diff --git a/deployment/docker-compose.mssql.yml b/deployment/docker-compose.mssql.yml new file mode 100644 index 000000000..2b1a17e32 --- /dev/null +++ b/deployment/docker-compose.mssql.yml @@ -0,0 +1,247 @@ +# ============================================================ +# Sensenet lokális fejlesztői docker-compose +# ============================================================ +# Szolgáltatások: +# mssql – MS SQL Server 2019 Express (named volume) +# snauth – SnAuth identity server (DockerHub image) +# snapp – Sensenet API (lokális forrásból build-elt image) +# +# ── Dev-cert generálása (csak egyszer szükséges) ────────────── +# mkdir -p ./volumes/certificates +# dotnet dev-certs https -ep ./volumes/certificates/snapp.pfx \ +# -p SuP3rS3CuR3P4sSw0Rd +# dotnet dev-certs https --trust +# +# ── Indítás ─────────────────────────────────────────────────── +# docker compose up -d +# (első indításnál az snapp image build-elése eltarthat egy ideig) +# ============================================================ + +services: + + # ────────────────────────────────────────────────────────── + # MS SQL Server 2019 Express + # Elérhető a hoston: localhost,9999 + # ────────────────────────────────────────────────────────── + mssql: + image: mcr.microsoft.com/mssql/server:2022-latest + container_name: sensenet-snsql + environment: + ACCEPT_EULA: "Y" + MSSQL_SA_PASSWORD: "SuP3rS3CuR3P4sSw0Rd" + MSSQL_PID: "Express" + ports: + - "9999:1433" + volumes: + # Teljes /var/opt/mssql mount szükséges, különben az initdb nem fér hozzá + - mssql-data:/var/opt/mssql + networks: + - sensenet + healthcheck: + test: [ + "CMD-SHELL", + "/opt/mssql-tools18/bin/sqlcmd -S localhost -U sa -P 'SuP3rS3CuR3P4sSw0Rd' -Q 'SELECT 1' -No || exit 1" + ] + interval: 10s + timeout: 5s + retries: 10 + start_period: 40s + + # ────────────────────────────────────────────────────────── + # MSSQL init – létrehozza az üres adatbázist (egyszer fut le) + # ────────────────────────────────────────────────────────── + mssql-init: + image: mcr.microsoft.com/mssql/server:2022-latest + container_name: sensenet-snsql-init + networks: + - sensenet + depends_on: + mssql: + condition: service_healthy + restart: "no" + entrypoint: ["/bin/bash", "-c"] + command: + - | + /opt/mssql-tools18/bin/sqlcmd -S sensenet-snsql -U sa -P 'SuP3rS3CuR3P4sSw0Rd' -No -Q \ + "IF NOT EXISTS (SELECT name FROM sys.databases WHERE name = 'sensenet-sndb') CREATE DATABASE [sensenet-sndb]" + + # ────────────────────────────────────────────────────────── + # DB Reset – csak --profile reset esetén fut + # Törli és újra létrehozza a sensenet-sndb adatbázist, + # valamint a Lucene indexet. + # ────────────────────────────────────────────────────────── + db-reset: + image: mcr.microsoft.com/mssql/server:2022-latest + container_name: sensenet-db-reset + profiles: ["reset"] + networks: + - sensenet + depends_on: + mssql: + condition: service_healthy + restart: "no" + entrypoint: ["/bin/bash", "-c"] + command: + - | + echo '🗑️ Dropping database sensenet-sndb...' + /opt/mssql-tools18/bin/sqlcmd -S sensenet-snsql -U sa -P 'SuP3rS3CuR3P4sSw0Rd' -No -Q \ + "IF EXISTS (SELECT name FROM sys.databases WHERE name = 'sensenet-sndb') + BEGIN + ALTER DATABASE [sensenet-sndb] SET SINGLE_USER WITH ROLLBACK IMMEDIATE; + DROP DATABASE [sensenet-sndb]; + END" + echo '✅ Database dropped.' + echo '🆕 Creating empty database sensenet-sndb...' + /opt/mssql-tools18/bin/sqlcmd -S sensenet-snsql -U sa -P 'SuP3rS3CuR3P4sSw0Rd' -No -Q \ + "CREATE DATABASE [sensenet-sndb]" + echo '✅ Database created.' + echo '🗑️ Clearing Lucene index...' + rm -rf /app-data/LocalIndex/* + echo '✅ Index cleared. Fresh install will run on next snapp start.' + volumes: + - ./App_Data:/app-data + + # ────────────────────────────────────────────────────────── + # SnAuth – identity / JWT server + # DockerHub: sensenetcsp/sn-auth:preview + # HTTPS elérhető a hoston: https://localhost:44311 + # ────────────────────────────────────────────────────────── + snauth: + image: sensenetcsp/sn-auth:preview + container_name: sensenet-snis + environment: + ASPNETCORE_URLS: "https://+:443;http://+:80" + ASPNETCORE_ENVIRONMENT: "Development" + + # ── Sensenet repository (container-to-container) ───── + # Publikus URL (böngészőből / kliensből elérhető) + Sensenet__Repository__Url: "https://localhost:44362" + # Belső URL: az snauth container ezen éri el az snapp containert + Sensenet__Repository__InnerUrl: "http://sensenet-snapp" + Sensenet__Repository__Authentication__ApiKey: "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted" + + # ── JWT beállítások ─────────────────────────────────── + JwtSettings__Issuer: "https://localhost:44311" + JwtSettings__Audience: "sensenet" + JwtSettings__SecretKey: "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted" + JwtSettings__AuthTokenExpiryMinutes: "300" + JwtSettings__MultiFactorAuthExpiryMinutes: "300" + JwtSettings__TokenExpiryMinutes: "300" + JwtSettings__RefreshTokenExpiryDays: "15" + PasswordRecovery__TokenExpiryMinutes: "60" + Registration__IsEnabled: "false" + Recaptcha__SiteKey: "" + Recaptcha__SecretKey: "" + + # ── Alkalmazás / CORS ───────────────────────────────── + Application__Url: "https://localhost:44311" + Application__AllowedHosts__0: "https://adminui.test.sensenet.com" + Application__AllowedHosts__1: "https://localhost:44362" + Application__AllowedHosts__2: "http://sensenet-snapp" + + # ── Dev cert (mount-olt) ────────────────────────────── + Kestrel__Certificates__Default__Path: "/root/.aspnet/https/snapp.pfx" + Kestrel__Certificates__Default__Password: "SuP3rS3CuR3P4sSw0Rd" + + ports: + - "44311:443" + volumes: + - ./volumes/certificates:/root/.aspnet/https:ro + networks: + - sensenet + depends_on: + snapp: + condition: service_started + + # ────────────────────────────────────────────────────────── + # Sensenet API – lokális forrásból build-elt image + # Build context: ../src (a Dockerfile innen dolgozik) + # HTTPS elérhető a hoston: https://localhost:44362 + # ────────────────────────────────────────────────────────── + snapp: + build: + context: ../src + dockerfile: WebApps/SnWebApplication.Api.Sql.TokenAuth/Dockerfile + pull_policy: build + container_name: sensenet-snapp + environment: + ASPNETCORE_URLS: "https://+:443;http://+:80" + ASPNETCORE_ENVIRONMENT: "Development" + + # ── Adatbázis (mssql container) ─────────────────────── + ConnectionStrings__SnCrMsSql: "Persist Security Info=False;Initial Catalog=sensenet-sndb;Data Source=sensenet-snsql;User ID=sa;Password=SuP3rS3CuR3P4sSw0Rd;TrustServerCertificate=true" + + # ── SNAuth beállítások ──────────────────────────────── + sensenet__authentication__authServerType: "SNAuth" + # A publikus authority (böngésző/kliens számára, pl. token validáláshoz) + sensenet__authentication__authority: "https://localhost:44311" + # Belső URL: snapp container ezen éri el az snauth containert (metadata, health) + sensenet__authentication__metadatahost: "http://sensenet-snis" + sensenet__authentication__repositoryUrl: "https://localhost:44362" + sensenet__authentication__AddJwtCookie: "false" + sensenet__identityManagement__UserProfilesEnabled: "false" + sensenet__repository__Authentication__ApiKey: "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted" + + # ── Egyéb ───────────────────────────────────────────── + sensenet__apikeys__healthcheckeruser: "supaS3CUp4ss" + sensenet__install__mssql__EnableFirstInstallDB: "true" + # Lucene in-proc index könyvtár (az App_Data volume-on belül) + sensenet__indexing__IndexDirectoryPath: "/app/App_Data/LocalIndex" + + # ── Dev cert (mount-olt) ────────────────────────────── + Kestrel__Certificates__Default__Path: "/root/.aspnet/https/snapp.pfx" + Kestrel__Certificates__Default__Password: "SuP3rS3CuR3P4sSw0Rd" + + ports: + - "44362:443" + volumes: + - ./volumes/certificates:/root/.aspnet/https:ro + - ./App_Data:/app/App_Data + networks: + - sensenet + depends_on: + mssql-init: + condition: service_completed_successfully + + # ────────────────────────────────────────────────────────── + # API Key helper – kiírja az admin API key-t a konzolra + # Használat: + # docker compose -f docker-compose.mssql.yml run --rm apikey + # ────────────────────────────────────────────────────────── + apikey: + image: mcr.microsoft.com/mssql/server:2022-latest + container_name: sensenet-apikey + profiles: ["tools"] + networks: + - sensenet + depends_on: + mssql: + condition: service_healthy + restart: "no" + entrypoint: ["/bin/bash", "-c"] + command: + - | + KEY=$$(/opt/mssql-tools18/bin/sqlcmd -S sensenet-snsql -U sa -P 'SuP3rS3CuR3P4sSw0Rd' -d sensenet-sndb -No -h -1 -W -Q \ + "SET NOCOUNT ON; SELECT TOP 1 Value FROM AccessTokens WHERE UserId = 1 AND ExpirationDate > GETDATE() ORDER BY CreationDate DESC" 2>/dev/null | head -1 | tr -d '[:space:]') + if [ -n "$$KEY" ]; then + echo "" + echo "══════════════════════════════════════════════════" + echo " 🔑 Admin API Key (MSSQL)" + echo "══════════════════════════════════════════════════" + echo " $$KEY" + echo "══════════════════════════════════════════════════" + echo "" + else + echo "❌ No API key found. Is sensenet running?" + exit 1 + fi + +# ── Named volumes ────────────────────────────────────────────── +volumes: + mssql-data: + driver: local + +# ── Hálózat ──────────────────────────────────────────────────── +networks: + sensenet: + driver: bridge diff --git a/deployment/docker-compose.postgres.yml b/deployment/docker-compose.postgres.yml new file mode 100644 index 000000000..c6143d842 --- /dev/null +++ b/deployment/docker-compose.postgres.yml @@ -0,0 +1,239 @@ +# ============================================================ +# Sensenet lokális fejlesztői docker-compose (PostgreSQL) +# ============================================================ +# Szolgáltatások: +# postgres - PostgreSQL 16 (named volume) +# pgadmin - pgAdmin 4 (webes felület) +# snauth - SnAuth identity server (DockerHub image) +# snapp - Sensenet API (lokális forrásból build-elt image) +# +# ── Dev-cert generálása (csak egyszer szükséges) ────────────── +# mkdir -p ./volumes/certificates +# dotnet dev-certs https -ep ./volumes/certificates/snapp.pfx \ +# -p SuP3rS3CuR3P4sSw0Rd +# dotnet dev-certs https --trust +# +# ── Indítás ─────────────────────────────────────────────────── +# docker compose -f docker-compose.postgres.yml up -d +# +# ── Tiszta újratelepítés (DB törlés + index törlés) ────────── +# docker compose -f docker-compose.postgres.yml --profile reset up -d +# ============================================================ + +services: + + # ────────────────────────────────────────────────────────── + # PostgreSQL 16 + # Elérhető a hoston: localhost,5532 + # ────────────────────────────────────────────────────────── + postgres: + image: postgres:16 + container_name: sensenet-postgres + command: ["postgres", "-c", "max_connections=300"] + environment: + POSTGRES_USER: "postgres" + POSTGRES_PASSWORD: "SuP3rS3CuR3P4sSw0Rd" + POSTGRES_DB: "sensenet-sndb" + ports: + - "5532:5432" + volumes: + - postgres-data:/var/lib/postgresql/data + networks: + - sensenet + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d sensenet-sndb"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + + # ────────────────────────────────────────────────────────── + # pgAdmin 4 + # Elérhető a hoston: http://localhost:5433 + # ────────────────────────────────────────────────────────── + pgadmin: + image: dpage/pgadmin4 + container_name: sensenet-pgadmin + environment: + PGADMIN_DEFAULT_EMAIL: "admin@sensenet.com" + PGADMIN_DEFAULT_PASSWORD: "admin" + PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: "False" + PGADMIN_SERVER_JSON_FILE: "/pgadmin4/servers.json" + PGADMIN_REPLACE_SERVERS_ON_STARTUP: "True" + entrypoint: > + /bin/sh -c " + /venv/bin/python3 /pgadmin4/setup-password.py & + exec /entrypoint.sh" + ports: + - "5433:80" + volumes: + - ./volumes/pgadmin/servers.json:/pgadmin4/servers.json:ro + - ./volumes/pgadmin/setup-password.py:/pgadmin4/setup-password.py:ro + networks: + - sensenet + depends_on: + - postgres + + # ────────────────────────────────────────────────────────── + # SnAuth – identity / JWT server + # DockerHub: sensenetcsp/sn-auth:preview + # HTTPS elérhető a hoston: https://localhost:44311 + # ────────────────────────────────────────────────────────── + snauth: + image: sensenetcsp/sn-auth:preview + container_name: sensenet-snis + environment: + ASPNETCORE_URLS: "https://+:443;http://+:80" + ASPNETCORE_ENVIRONMENT: "Development" + + # ── Sensenet repository (container-to-container) ───── + Sensenet__Repository__Url: "https://localhost:44362" + Sensenet__Repository__InnerUrl: "http://sensenet-snapp" + Sensenet__Repository__Authentication__ApiKey: "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted" + + # ── JWT beállítások ─────────────────────────────────── + JwtSettings__Issuer: "https://localhost:44311" + JwtSettings__Audience: "sensenet" + JwtSettings__SecretKey: "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted" + JwtSettings__AuthTokenExpiryMinutes: "300" + JwtSettings__MultiFactorAuthExpiryMinutes: "300" + JwtSettings__TokenExpiryMinutes: "300" + JwtSettings__RefreshTokenExpiryDays: "15" + PasswordRecovery__TokenExpiryMinutes: "60" + Registration__IsEnabled: "false" + Recaptcha__SiteKey: "" + Recaptcha__SecretKey: "" + + # ── Alkalmazás / CORS ───────────────────────────────── + Application__Url: "https://localhost:44311" + Application__AllowedHosts__0: "https://adminui.test.sensenet.com" + Application__AllowedHosts__1: "https://localhost:44362" + Application__AllowedHosts__2: "http://sensenet-snapp" + + # ── Dev cert (mount-olt) ────────────────────────────── + Kestrel__Certificates__Default__Path: "/root/.aspnet/https/snapp.pfx" + Kestrel__Certificates__Default__Password: "SuP3rS3CuR3P4sSw0Rd" + + ports: + - "44311:443" + volumes: + - ./volumes/certificates:/root/.aspnet/https:ro + networks: + - sensenet + depends_on: + snapp: + condition: service_started + + # ────────────────────────────────────────────────────────── + # DB Reset – csak --profile reset esetén fut + # Törli és újra létrehozza a sensenet-sndb adatbázist, + # valamint a Lucene indexet. + # ────────────────────────────────────────────────────────── + db-reset: + image: postgres:16 + container_name: sensenet-db-reset + profiles: ["reset"] + environment: + PGPASSWORD: "SuP3rS3CuR3P4sSw0Rd" + entrypoint: ["/bin/bash", "-c", "echo '⏳ Terminating active connections...' && psql -h sensenet-postgres -U postgres -d postgres -c \"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'sensenet-sndb' AND pid <> pg_backend_pid();\" && echo '🗑️ Dropping database sensenet-sndb...' && dropdb -h sensenet-postgres -U postgres --if-exists sensenet-sndb && echo '✅ Database dropped.' && echo '🆕 Creating empty database sensenet-sndb...' && createdb -h sensenet-postgres -U postgres sensenet-sndb && echo '✅ Database created.' && echo '🗑️ Clearing Lucene index...' && rm -rf /app-data/LocalIndex/* && echo '✅ Index cleared. Fresh install will run on next snapp start.'"] + volumes: + - dotnet-data:/app-data + networks: + - sensenet + depends_on: + postgres: + condition: service_healthy + + # ────────────────────────────────────────────────────────── + # Sensenet API – lokális forrásból build-elt image + # Build context: ../src (a Dockerfile innen dolgozik) + # HTTPS elérhető a hoston: https://localhost:44362 + # ────────────────────────────────────────────────────────── + snapp: + build: + context: ../src + dockerfile: WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Dockerfile + pull_policy: build + container_name: sensenet-snapp + environment: + ASPNETCORE_URLS: "https://+:443;http://+:80" + ASPNETCORE_ENVIRONMENT: "Development" + + # ── Adatbázis (postgres container) ─────────────────────── + ConnectionStrings__SensenetRepository: "Host=sensenet-postgres;Database=sensenet-sndb;Username=postgres;Password=SuP3rS3CuR3P4sSw0Rd;Maximum Pool Size=200;Connection Idle Lifetime=60;Connection Pruning Interval=10" + + # ── SNAuth beállítások ──────────────────────────────── + sensenet__authentication__authServerType: "SNAuth" + sensenet__authentication__authority: "https://localhost:44311" + sensenet__authentication__metadatahost: "http://sensenet-snis" + sensenet__authentication__repositoryUrl: "https://localhost:44362" + sensenet__authentication__AddJwtCookie: "false" + sensenet__identityManagement__UserProfilesEnabled: "false" + sensenet__repository__Authentication__ApiKey: "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted" + + # ── Egyéb ───────────────────────────────────────────── + sensenet__apikeys__healthcheckeruser: "supaS3CUp4ss" + sensenet__install__postgres__EnableFirstInstallDB: "true" + sensenet__indexing__IndexDirectoryPath: "/app/App_Data/LocalIndex" + sensenet__Retrier__Count: "1" + + # ── Dev cert (mount-olt) ────────────────────────────── + Kestrel__Certificates__Default__Path: "/root/.aspnet/https/snapp.pfx" + Kestrel__Certificates__Default__Password: "SuP3rS3CuR3P4sSw0Rd" + + ports: + - "44362:443" + volumes: + - ./volumes/certificates:/root/.aspnet/https:ro + - dotnet-data:/app/App_Data + networks: + - sensenet + depends_on: + postgres: + condition: service_healthy + + # ────────────────────────────────────────────────────────── + # API Key helper – kiírja az admin API key-t a konzolra + # Használat: + # docker compose -f docker-compose.postgres.yml run --rm apikey + # ────────────────────────────────────────────────────────── + apikey: + image: postgres:16 + container_name: sensenet-apikey + profiles: ["tools"] + environment: + PGPASSWORD: "SuP3rS3CuR3P4sSw0Rd" + entrypoint: ["/bin/bash", "-c"] + command: + - | + KEY=$$(psql -h sensenet-postgres -U postgres -d sensenet-sndb -t -A -c \ + "SELECT \"Value\" FROM \"AccessTokens\" WHERE \"UserId\" = 1 AND \"ExpirationDate\" > NOW() ORDER BY \"CreationDate\" DESC LIMIT 1;" 2>/dev/null) + if [ -n "$$KEY" ]; then + echo "" + echo "══════════════════════════════════════════════════" + echo " 🔑 Admin API Key (PostgreSQL)" + echo "══════════════════════════════════════════════════" + echo " $$KEY" + echo "══════════════════════════════════════════════════" + echo "" + else + echo "❌ No API key found. Is sensenet running?" + exit 1 + fi + networks: + - sensenet + depends_on: + postgres: + condition: service_healthy + +# ── Named volumes ────────────────────────────────────────────── +volumes: + postgres-data: + driver: local + dotnet-data: + driver: local + +# ── Hálózat ──────────────────────────────────────────────────── +networks: + sensenet: + driver: bridge diff --git a/deployment/readme.md b/deployment/readme.md index b5aa3ea64..56ea02057 100644 --- a/deployment/readme.md +++ b/deployment/readme.md @@ -119,4 +119,269 @@ This switch can be used to see what processes would be executed but without actu ### Verbose -The output is reduced by default to decrease the amount of install information. With the `-Verbose` switch all the additional technical information will be shown. For example the actual Docker command is shown that can be useful if you need to customize the installation. \ No newline at end of file +The output is reduced by default to decrease the amount of install information. With the `-Verbose` switch all the additional technical information will be shown. For example the actual Docker command is shown that can be useful if you need to customize the installation. + +--- + +## Docker Compose Setup (Linux / macOS) + +In addition to the PowerShell installer above, there are two **docker-compose** files for running sensenet locally with either **PostgreSQL** or **MSSQL**. These are self-contained and don't require the PowerShell scripts. + +### Prerequisites + +- Docker & Docker Compose v2+ +- A dev certificate at `./volumes/certificates/snapp.pfx` (generated once): + +```bash +mkdir -p ./volumes/certificates +dotnet dev-certs https -ep ./volumes/certificates/snapp.pfx -p SuP3rS3CuR3P4sSw0Rd +dotnet dev-certs https --trust # Linux: may need manual trust +``` + +--- + +### PostgreSQL Stack + +**File:** `docker-compose.postgres.yml` + +| Service | Description | Host Port | +|---------|-------------|-----------| +| `postgres` | PostgreSQL 16 | `localhost:5532` | +| `pgadmin` | pgAdmin 4 web UI | `http://localhost:5433` | +| `snauth` | SnAuth identity / JWT server | `https://localhost:44311` | +| `snapp` | sensenet API (built from local source) | `https://localhost:44362` | + +#### Start + +```bash +docker compose -f docker-compose.postgres.yml up -d +``` + +The first start builds the `snapp` image from `../src` and runs the sensenet installer automatically (schema + initial content). This takes 1–3 minutes. + +#### Stop + +```bash +docker compose -f docker-compose.postgres.yml down +``` + +#### Get Admin API Key + +```bash +docker compose -f docker-compose.postgres.yml run --rm apikey +``` + +This starts a one-shot container that queries the database and prints the current admin API key: + +``` +══════════════════════════════════════════════════ + 🔑 Admin API Key (PostgreSQL) +══════════════════════════════════════════════════ + NJ1AFRTz8L1FbsJ0qyq1hToTfUiPYFh65CVIcd5kb4L... +══════════════════════════════════════════════════ +``` + +> **Note:** The admin API key is regenerated on every `snapp` restart. + +--- + +### MSSQL Stack + +**File:** `docker-compose.mssql.yml` + +| Service | Description | Host Port | +|---------|-------------|-----------| +| `mssql` | SQL Server 2022 Express | `localhost:9999` | +| `mssql-init` | Creates the `sensenet-sndb` database (runs once) | — | +| `snauth` | SnAuth identity / JWT server | `https://localhost:44311` | +| `snapp` | sensenet API (built from local source) | `https://localhost:44362` | + +#### Start + +```bash +docker compose -f docker-compose.mssql.yml up -d +``` + +#### Stop + +```bash +docker compose -f docker-compose.mssql.yml down +``` + +#### Get Admin API Key + +```bash +docker compose -f docker-compose.mssql.yml run --rm apikey +``` + +--- + +### Full Reset (DB + Index) + +Both compose files include a `db-reset` service under the `reset` Docker Compose profile. Running it drops the database, recreates it empty, and clears the Lucene index so that the next `snapp` start performs a fresh install. + +#### Manual reset (PostgreSQL) + +```bash +# 1. Stop everything +docker compose -f docker-compose.postgres.yml down + +# 2. Run the reset profile (drops + recreates DB, clears index) +docker compose -f docker-compose.postgres.yml --profile reset up db-reset --abort-on-container-exit + +# 3. Stop reset containers +docker compose -f docker-compose.postgres.yml --profile reset down + +# 4. Start fresh +docker compose -f docker-compose.postgres.yml up -d +``` + +#### Manual reset (MSSQL) + +```bash +docker compose -f docker-compose.mssql.yml down +docker compose -f docker-compose.mssql.yml --profile reset up db-reset --abort-on-container-exit +docker compose -f docker-compose.mssql.yml --profile reset down +docker compose -f docker-compose.mssql.yml up -d +``` + +--- + +## Shell Scripts (Linux / macOS) + +All bash scripts are in the `deployment/scripts-linux/` directory. You can run +them from anywhere — they resolve paths relative to their own location. + +```bash +cd deployment/scripts-linux +``` + +### Start + +| Script | Description | +|--------|-------------| +| `./start-postgres.sh` | Start the PostgreSQL stack | +| `./start-mssql.sh` | Start the MSSQL stack | + +```bash +./start-postgres.sh # start (rebuild if image doesn't exist) +./start-postgres.sh --build # force rebuild snapp image +./start-postgres.sh --no-build # skip building, use existing image +``` + +Both start scripts wait up to 3 minutes for sensenet to respond with HTTP 200, +then print all service URLs and how to get the API key. + +### Stop + +| Script | Description | +|--------|-------------| +| `./stop-postgres.sh` | Stop the PostgreSQL stack (data preserved) | +| `./stop-mssql.sh` | Stop the MSSQL stack (data preserved) | + +```bash +./stop-postgres.sh # stop containers, keep database data +./stop-postgres.sh --clean # stop + remove Docker volumes (⚠ deletes DB!) +``` + +### Reset (full reinstall) + +| Script | Description | +|--------|-------------| +| `./reset-postgres.sh` | Stop → drop DB → clear index → rebuild → start | +| `./reset-mssql.sh` | Stop → drop DB → clear index → rebuild → start | + +```bash +./reset-postgres.sh # full reset + rebuild snapp image +./reset-postgres.sh --no-build # reset without rebuilding the image +``` + +The reset scripts perform these steps: + +1. **Stop** all containers (including the `reset` profile) +2. **Drop & recreate** the database via the `db-reset` container +3. **Clear** the Lucene index (`App_Data/LocalIndex`) +4. **Rebuild** the `snapp` Docker image from source (skip with `--no-build`) +5. **Start** the full stack +6. **Wait** up to 3 minutes for sensenet to respond with HTTP 200 on `/odata.svc/Root` + +### API Key + +The admin API key is **regenerated on every `snapp` restart**. Use these scripts +to read the current key from the database: + +| Script | Description | +|--------|-------------| +| `./apikey-postgres.sh` | Read the admin API key from PostgreSQL | +| `./apikey-mssql.sh` | Read the admin API key from MSSQL | + +```bash +./apikey-postgres.sh # print the key +./apikey-postgres.sh --copy # print + copy to clipboard +./apikey-postgres.sh --bench # print + update SnBenchmark appsettings.json +``` + +The `--bench` flag automatically writes the key into +`tools/SnBenchmark/appsettings.json` so you can run the benchmark tool right +away without manual copy-paste. + +> You can also use the docker-compose one-shot container: +> `docker compose -f docker-compose.postgres.yml run --rm apikey` + +### Quick Reference + +```bash +cd deployment/scripts-linux + +# ── PostgreSQL ────────────────────────────────── +./start-postgres.sh # start +./stop-postgres.sh # stop +./reset-postgres.sh # full reset +./apikey-postgres.sh # get API key +./apikey-postgres.sh --bench # get API key + update benchmark config + +# ── MSSQL ─────────────────────────────────────── +./start-mssql.sh # start +./stop-mssql.sh # stop +./reset-mssql.sh # full reset +./apikey-mssql.sh # get API key +./apikey-mssql.sh --bench # get API key + update benchmark config +``` + +--- + +## Default Credentials + +| Service | User | Password | +|---------|------|----------| +| sensenet admin UI | `admin` | `admin` | +| PostgreSQL | `postgres` | `SuP3rS3CuR3P4sSw0Rd` | +| pgAdmin | `admin@sensenet.com` | `admin` | +| MSSQL SA | `sa` | `SuP3rS3CuR3P4sSw0Rd` | + +--- + +## File Overview + +``` +deployment/ +├── docker-compose.postgres.yml PostgreSQL stack (postgres, pgadmin, snauth, snapp) +├── docker-compose.mssql.yml MSSQL stack (mssql, snauth, snapp) +├── install-sensenet.ps1 Legacy PowerShell installer +├── readme.md This file +├── App_Data/ Mounted into snapp (Lucene index, logs) +├── scripts-linux/ Bash scripts for Linux / macOS +│ ├── start-postgres.sh Start the PostgreSQL stack +│ ├── stop-postgres.sh Stop the PostgreSQL stack +│ ├── reset-postgres.sh Full reset (drop DB + clear index + restart) +│ ├── apikey-postgres.sh Read admin API key from PostgreSQL +│ ├── start-mssql.sh Start the MSSQL stack +│ ├── stop-mssql.sh Stop the MSSQL stack +│ ├── reset-mssql.sh Full reset (drop DB + clear index + restart) +│ └── apikey-mssql.sh Read admin API key from MSSQL +├── scripts/ PowerShell helper scripts +├── volumes/ +│ ├── certificates/ Dev TLS certificate (snapp.pfx) +│ └── pgadmin/ pgAdmin server config +└── ... +``` \ No newline at end of file diff --git a/deployment/scripts-linux/apikey-mssql.sh b/deployment/scripts-linux/apikey-mssql.sh new file mode 100755 index 000000000..3a8ba9e5b --- /dev/null +++ b/deployment/scripts-linux/apikey-mssql.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +# ============================================================ +# apikey-mssql.sh +# Reads the current admin API key from the MSSQL database +# and prints it to the console. +# +# Usage: +# ./apikey-mssql.sh # print API key +# ./apikey-mssql.sh --copy # also copy to clipboard +# ./apikey-mssql.sh --bench # also update SnBenchmark appsettings.json +# ./apikey-mssql.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COPY=false +BENCH=false + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --copy) COPY=true ;; + --bench) BENCH=true ;; + --help|-h) + echo "Usage: $0 [--copy] [--bench] [--help]" + echo "" + echo "Reads the admin API key from the MSSQL database." + echo "" + echo "Options:" + echo " --copy Copy the key to clipboard (requires xclip or xsel)" + echo " --bench Update tools/SnBenchmark/appsettings.json with the key" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +# ── Read key ───────────────────────────────────────────────── +KEY=$(docker exec sensenet-snsql /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'SuP3rS3CuR3P4sSw0Rd' -d sensenet-sndb -No -h -1 -W -Q \ + "SET NOCOUNT ON; SELECT TOP 1 Value FROM AccessTokens WHERE UserId = 1 AND ExpirationDate > GETDATE() ORDER BY CreationDate DESC" \ + 2>/dev/null | head -1 | tr -d '[:space:]') + +if [ -z "$KEY" ]; then + echo -e "${RED}✗ No API key found.${NC}" + echo -e "${YELLOW} Is the MSSQL stack running? Try: ./start-mssql.sh${NC}" + exit 1 +fi + +echo "" +echo -e "${BOLD}══════════════════════════════════════════════════${NC}" +echo -e "${BOLD} 🔑 Admin API Key (MSSQL)${NC}" +echo -e "${BOLD}══════════════════════════════════════════════════${NC}" +echo -e " ${GREEN}${KEY}${NC}" +echo -e "${BOLD}══════════════════════════════════════════════════${NC}" +echo "" + +# ── Copy to clipboard ─────────────────────────────────────── +if [ "$COPY" = true ]; then + if command -v xclip &>/dev/null; then + echo -n "$KEY" | xclip -selection clipboard + echo -e " ${GREEN}✓ Copied to clipboard${NC}" + elif command -v xsel &>/dev/null; then + echo -n "$KEY" | xsel --clipboard --input + echo -e " ${GREEN}✓ Copied to clipboard${NC}" + elif command -v pbcopy &>/dev/null; then + echo -n "$KEY" | pbcopy + echo -e " ${GREEN}✓ Copied to clipboard${NC}" + else + echo -e " ${YELLOW}⚠ No clipboard tool found (install xclip or xsel)${NC}" + fi +fi + +# ── Update SnBenchmark ────────────────────────────────────── +if [ "$BENCH" = true ]; then + BENCH_SETTINGS="$SCRIPT_DIR/../../tools/SnBenchmark/appsettings.json" + if [ -f "$BENCH_SETTINGS" ]; then + sed -i "s|\"ApiKey\": \".*\"|\"ApiKey\": \"$KEY\"|" "$BENCH_SETTINGS" + echo -e " ${GREEN}✓ Updated tools/SnBenchmark/appsettings.json${NC}" + else + echo -e " ${YELLOW}⚠ SnBenchmark appsettings.json not found at $BENCH_SETTINGS${NC}" + fi +fi + +echo "" diff --git a/deployment/scripts-linux/apikey-postgres.sh b/deployment/scripts-linux/apikey-postgres.sh new file mode 100755 index 000000000..c5e9bb914 --- /dev/null +++ b/deployment/scripts-linux/apikey-postgres.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# ============================================================ +# apikey-postgres.sh +# Reads the current admin API key from the PostgreSQL database +# and prints it to the console. +# +# Usage: +# ./apikey-postgres.sh # print API key +# ./apikey-postgres.sh --copy # also copy to clipboard +# ./apikey-postgres.sh --bench # also update SnBenchmark appsettings.json +# ./apikey-postgres.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COPY=false +BENCH=false + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --copy) COPY=true ;; + --bench) BENCH=true ;; + --help|-h) + echo "Usage: $0 [--copy] [--bench] [--help]" + echo "" + echo "Reads the admin API key from the PostgreSQL database." + echo "" + echo "Options:" + echo " --copy Copy the key to clipboard (requires xclip or xsel)" + echo " --bench Update tools/SnBenchmark/appsettings.json with the key" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +# ── Read key ───────────────────────────────────────────────── +KEY=$(docker exec sensenet-postgres psql -U postgres -d sensenet-sndb -t -A -c \ + "SELECT \"Value\" FROM \"AccessTokens\" WHERE \"UserId\" = 1 AND \"ExpirationDate\" > NOW() ORDER BY \"CreationDate\" DESC LIMIT 1;" 2>/dev/null | tr -d '[:space:]') + +if [ -z "$KEY" ]; then + echo -e "${RED}✗ No API key found.${NC}" + echo -e "${YELLOW} Is the PostgreSQL stack running? Try: ./start-postgres.sh${NC}" + exit 1 +fi + +echo "" +echo -e "${BOLD}══════════════════════════════════════════════════${NC}" +echo -e "${BOLD} 🔑 Admin API Key (PostgreSQL)${NC}" +echo -e "${BOLD}══════════════════════════════════════════════════${NC}" +echo -e " ${GREEN}${KEY}${NC}" +echo -e "${BOLD}══════════════════════════════════════════════════${NC}" +echo "" + +# ── Copy to clipboard ─────────────────────────────────────── +if [ "$COPY" = true ]; then + if command -v xclip &>/dev/null; then + echo -n "$KEY" | xclip -selection clipboard + echo -e " ${GREEN}✓ Copied to clipboard${NC}" + elif command -v xsel &>/dev/null; then + echo -n "$KEY" | xsel --clipboard --input + echo -e " ${GREEN}✓ Copied to clipboard${NC}" + elif command -v pbcopy &>/dev/null; then + echo -n "$KEY" | pbcopy + echo -e " ${GREEN}✓ Copied to clipboard${NC}" + else + echo -e " ${YELLOW}⚠ No clipboard tool found (install xclip or xsel)${NC}" + fi +fi + +# ── Update SnBenchmark ────────────────────────────────────── +if [ "$BENCH" = true ]; then + BENCH_SETTINGS="$SCRIPT_DIR/../../tools/SnBenchmark/appsettings.json" + if [ -f "$BENCH_SETTINGS" ]; then + sed -i "s|\"ApiKey\": \".*\"|\"ApiKey\": \"$KEY\"|" "$BENCH_SETTINGS" + echo -e " ${GREEN}✓ Updated tools/SnBenchmark/appsettings.json${NC}" + else + echo -e " ${YELLOW}⚠ SnBenchmark appsettings.json not found at $BENCH_SETTINGS${NC}" + fi +fi + +echo "" diff --git a/deployment/scripts-linux/reset-mssql.sh b/deployment/scripts-linux/reset-mssql.sh new file mode 100755 index 000000000..57cc09916 --- /dev/null +++ b/deployment/scripts-linux/reset-mssql.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash +# ============================================================ +# reset-mssql.sh +# Stops, resets, and restarts the sensenet MSSQL stack. +# +# Usage: +# ./reset-mssql.sh # full reset + rebuild +# ./reset-mssql.sh --no-build # full reset, no rebuild +# ./reset-mssql.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEPLOY_DIR="$SCRIPT_DIR/.." +COMPOSE_FILE="$DEPLOY_DIR/docker-compose.mssql.yml" +APP_DATA="$DEPLOY_DIR/App_Data" +REBUILD=true + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --no-build) REBUILD=false ;; + --help|-h) + echo "Usage: $0 [--no-build] [--help]" + echo "" + echo "Stops all containers, drops the MSSQL database," + echo "clears the Lucene index, rebuilds the snapp image," + echo "and starts everything fresh." + echo "" + echo "Options:" + echo " --no-build Skip rebuilding the snapp Docker image" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +step() { echo -e "\n${CYAN}${BOLD}▶ $1${NC}"; } +ok() { echo -e " ${GREEN}✓ $1${NC}"; } +warn() { echo -e " ${YELLOW}⚠ $1${NC}"; } +fail() { echo -e " ${RED}✗ $1${NC}"; exit 1; } + +echo -e "${BOLD}" +echo "╔═══════════════════════════════════════════════╗" +echo "║ sensenet MSSQL — Full Reset & Restart ║" +echo "╚═══════════════════════════════════════════════╝" +echo -e "${NC}" + +# ── 1. Stop all containers ────────────────────────────────── +step "Stopping all containers..." +docker compose -f "$COMPOSE_FILE" --profile reset down --remove-orphans 2>/dev/null || true +ok "Containers stopped" + +# ── 2. Drop the database via reset profile ────────────────── +step "Starting MSSQL and running DB reset..." +docker compose -f "$COMPOSE_FILE" --profile reset up db-reset --abort-on-container-exit 2>&1 +RESET_EXIT=$? +if [ $RESET_EXIT -ne 0 ]; then + warn "Reset profile exited with code $RESET_EXIT (may be OK if DB didn't exist)" +fi + +# Also make sure index is cleared on the host side +if [ -d "$APP_DATA/LocalIndex" ]; then + rm -rf "$APP_DATA/LocalIndex"/* + ok "Lucene index cleared (host)" +else + warn "No LocalIndex directory found at $APP_DATA/LocalIndex" +fi + +# ── 3. Stop the reset containers ──────────────────────────── +step "Stopping reset containers..." +docker compose -f "$COMPOSE_FILE" --profile reset down --remove-orphans 2>/dev/null || true +ok "Reset containers stopped" + +# ── 4. Rebuild snapp image ────────────────────────────────── +if [ "$REBUILD" = true ]; then + step "Rebuilding snapp image..." + docker compose -f "$COMPOSE_FILE" build snapp + ok "Image rebuilt" +else + warn "Skipping rebuild (--no-build)" +fi + +# ── 5. Start the full stack ───────────────────────────────── +step "Starting the full stack..." +docker compose -f "$COMPOSE_FILE" up -d +ok "All containers started" + +# ── 6. Wait for sensenet to become ready ──────────────────── +step "Waiting for sensenet to become ready..." +echo -e " ${CYAN}(this may take 1–3 minutes for fresh install)${NC}" + +MAX_WAIT=180 +ELAPSED=0 +while [ $ELAPSED -lt $MAX_WAIT ]; do + HTTP_CODE=$(curl -sk -o /dev/null -w "%{http_code}" https://localhost:44362/odata.svc/Root 2>/dev/null || echo "000") + if [ "$HTTP_CODE" = "200" ]; then + ok "sensenet is ready! (HTTP 200 after ${ELAPSED}s)" + break + fi + printf "\r ⏳ Waiting... %3ds / %ds (last HTTP: %s)" "$ELAPSED" "$MAX_WAIT" "$HTTP_CODE" + sleep 3 + ELAPSED=$((ELAPSED + 3)) +done + +if [ $ELAPSED -ge $MAX_WAIT ]; then + echo "" + warn "Timed out after ${MAX_WAIT}s. Check logs: docker compose -f docker-compose.mssql.yml logs -f snapp" +fi + +# ── Done ──────────────────────────────────────────────────── +echo "" +echo -e "${GREEN}${BOLD}╔═══════════════════════════════════╗${NC}" +echo -e "${GREEN}${BOLD}║ ✅ Reset complete! ║${NC}" +echo -e "${GREEN}${BOLD}╚═══════════════════════════════════╝${NC}" +echo "" +echo -e " ${CYAN}Repository:${NC} https://localhost:44362" +echo -e " ${CYAN}SnAuth:${NC} https://localhost:44311" +echo -e " ${CYAN}MSSQL:${NC} localhost:9999" +echo "" +echo -e " ${CYAN}Logs:${NC} docker compose -f docker-compose.mssql.yml logs -f snapp" +echo "" diff --git a/deployment/scripts-linux/reset-postgres.sh b/deployment/scripts-linux/reset-postgres.sh new file mode 100755 index 000000000..dc37e2b6e --- /dev/null +++ b/deployment/scripts-linux/reset-postgres.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash +# ============================================================ +# reset-postgres.sh +# Stops, resets, and restarts the sensenet PostgreSQL stack. +# +# Usage: +# ./reset-postgres.sh # full reset + rebuild +# ./reset-postgres.sh --no-build # full reset, no rebuild +# ./reset-postgres.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEPLOY_DIR="$SCRIPT_DIR/.." +COMPOSE_FILE="$DEPLOY_DIR/docker-compose.postgres.yml" +APP_DATA="$DEPLOY_DIR/App_Data" +REBUILD=true + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --no-build) REBUILD=false ;; + --help|-h) + echo "Usage: $0 [--no-build] [--help]" + echo "" + echo "Stops all containers, drops the PostgreSQL database," + echo "clears the Lucene index, rebuilds the snapp image," + echo "and starts everything fresh." + echo "" + echo "Options:" + echo " --no-build Skip rebuilding the snapp Docker image" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +step() { echo -e "\n${CYAN}${BOLD}▶ $1${NC}"; } +ok() { echo -e " ${GREEN}✓ $1${NC}"; } +warn() { echo -e " ${YELLOW}⚠ $1${NC}"; } +fail() { echo -e " ${RED}✗ $1${NC}"; exit 1; } + +echo -e "${BOLD}" +echo "╔═══════════════════════════════════════════════╗" +echo "║ sensenet PostgreSQL — Full Reset & Restart ║" +echo "╚═══════════════════════════════════════════════╝" +echo -e "${NC}" + +# ── 1. Stop all containers ────────────────────────────────── +step "Stopping all containers..." +docker compose -f "$COMPOSE_FILE" --profile reset down --remove-orphans 2>/dev/null || true +ok "Containers stopped" + +# ── 2. Drop the database via reset profile ────────────────── +step "Starting PostgreSQL and running DB reset..." +docker compose -f "$COMPOSE_FILE" --profile reset up db-reset --abort-on-container-exit 2>&1 +RESET_EXIT=$? +if [ $RESET_EXIT -ne 0 ]; then + warn "Reset profile exited with code $RESET_EXIT (may be OK if DB didn't exist)" +fi + +# Also make sure index is cleared on the host side +if [ -d "$APP_DATA/LocalIndex" ]; then + rm -rf "$APP_DATA/LocalIndex"/* + ok "Lucene index cleared (host)" +else + warn "No LocalIndex directory found at $APP_DATA/LocalIndex" +fi + +# ── 3. Stop the reset containers ──────────────────────────── +step "Stopping reset containers..." +docker compose -f "$COMPOSE_FILE" --profile reset down --remove-orphans 2>/dev/null || true +ok "Reset containers stopped" + +# ── 4. Rebuild snapp image ────────────────────────────────── +if [ "$REBUILD" = true ]; then + step "Rebuilding snapp image..." + docker compose -f "$COMPOSE_FILE" build snapp + ok "Image rebuilt" +else + warn "Skipping rebuild (--no-build)" +fi + +# ── 5. Start the full stack ───────────────────────────────── +step "Starting the full stack..." +docker compose -f "$COMPOSE_FILE" up -d +ok "All containers started" + +# ── 6. Wait for sensenet to become ready ──────────────────── +step "Waiting for sensenet to become ready..." +echo -e " ${CYAN}(this may take 1–3 minutes for fresh install)${NC}" + +MAX_WAIT=180 +ELAPSED=0 +while [ $ELAPSED -lt $MAX_WAIT ]; do + HTTP_CODE=$(curl -sk -o /dev/null -w "%{http_code}" https://localhost:44362/odata.svc/Root 2>/dev/null || echo "000") + if [ "$HTTP_CODE" = "200" ]; then + ok "sensenet is ready! (HTTP 200 after ${ELAPSED}s)" + break + fi + printf "\r ⏳ Waiting... %3ds / %ds (last HTTP: %s)" "$ELAPSED" "$MAX_WAIT" "$HTTP_CODE" + sleep 3 + ELAPSED=$((ELAPSED + 3)) +done + +if [ $ELAPSED -ge $MAX_WAIT ]; then + echo "" + warn "Timed out after ${MAX_WAIT}s. Check logs: docker compose -f docker-compose.postgres.yml logs -f snapp" +fi + +# ── Done ──────────────────────────────────────────────────── +echo "" +echo -e "${GREEN}${BOLD}╔═══════════════════════════════════╗${NC}" +echo -e "${GREEN}${BOLD}║ ✅ Reset complete! ║${NC}" +echo -e "${GREEN}${BOLD}╚═══════════════════════════════════╝${NC}" +echo "" +echo -e " ${CYAN}Repository:${NC} https://localhost:44362" +echo -e " ${CYAN}SnAuth:${NC} https://localhost:44311" +echo -e " ${CYAN}pgAdmin:${NC} http://localhost:5433" +echo -e " ${CYAN}PostgreSQL:${NC} localhost:5532" +echo "" +echo -e " ${CYAN}Logs:${NC} docker compose -f docker-compose.postgres.yml logs -f snapp" +echo "" diff --git a/deployment/scripts-linux/start-mssql.sh b/deployment/scripts-linux/start-mssql.sh new file mode 100755 index 000000000..90f635095 --- /dev/null +++ b/deployment/scripts-linux/start-mssql.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +# ============================================================ +# start-mssql.sh +# Starts the sensenet MSSQL stack. +# +# Usage: +# ./start-mssql.sh # start (rebuild if needed) +# ./start-mssql.sh --no-build # start without rebuild +# ./start-mssql.sh --build # force rebuild snapp image +# ./start-mssql.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEPLOY_DIR="$SCRIPT_DIR/.." +COMPOSE_FILE="$DEPLOY_DIR/docker-compose.mssql.yml" +BUILD_ARG="" + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --no-build) BUILD_ARG="--no-build" ;; + --build) BUILD_ARG="--build" ;; + --help|-h) + echo "Usage: $0 [--build] [--no-build] [--help]" + echo "" + echo "Starts the sensenet MSSQL stack." + echo "" + echo "Options:" + echo " --build Force rebuild the snapp Docker image" + echo " --no-build Skip building, use existing image" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +GREEN='\033[0;32m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +step() { echo -e "\n${CYAN}${BOLD}▶ $1${NC}"; } +ok() { echo -e " ${GREEN}✓ $1${NC}"; } + +echo -e "${BOLD}" +echo "╔═══════════════════════════════════════════════╗" +echo "║ sensenet MSSQL — Start ║" +echo "╚═══════════════════════════════════════════════╝" +echo -e "${NC}" + +# ── Start ──────────────────────────────────────────────────── +step "Starting the MSSQL stack..." +docker compose -f "$COMPOSE_FILE" up -d $BUILD_ARG +ok "All containers started" + +# ── Wait for sensenet ──────────────────────────────────────── +step "Waiting for sensenet to become ready..." +echo -e " ${CYAN}(may take 1–3 minutes on fresh install)${NC}" + +MAX_WAIT=180 +ELAPSED=0 +while [ $ELAPSED -lt $MAX_WAIT ]; do + HTTP_CODE=$(curl -sk -o /dev/null -w "%{http_code}" https://localhost:44362/odata.svc/Root 2>/dev/null || echo "000") + if [ "$HTTP_CODE" = "200" ]; then + ok "sensenet is ready! (HTTP 200 after ${ELAPSED}s)" + break + fi + printf "\r ⏳ Waiting... %3ds / %ds (last HTTP: %s)" "$ELAPSED" "$MAX_WAIT" "$HTTP_CODE" + sleep 3 + ELAPSED=$((ELAPSED + 3)) +done + +if [ $ELAPSED -ge $MAX_WAIT ]; then + echo "" + echo -e " ${CYAN}⚠ Timed out. Check logs: docker compose -f docker-compose.mssql.yml logs -f snapp${NC}" +fi + +# ── Done ───────────────────────────────────────────────────── +echo "" +echo -e "${GREEN}${BOLD}╔═══════════════════════════════════╗${NC}" +echo -e "${GREEN}${BOLD}║ ✅ Stack is running! ║${NC}" +echo -e "${GREEN}${BOLD}╚═══════════════════════════════════╝${NC}" +echo "" +echo -e " ${CYAN}Repository:${NC} https://localhost:44362" +echo -e " ${CYAN}SnAuth:${NC} https://localhost:44311" +echo -e " ${CYAN}MSSQL:${NC} localhost:9999" +echo "" +echo -e " ${CYAN}API Key:${NC} docker compose -f docker-compose.mssql.yml run --rm apikey" +echo -e " ${CYAN}Logs:${NC} docker compose -f docker-compose.mssql.yml logs -f snapp" +echo "" diff --git a/deployment/scripts-linux/start-postgres.sh b/deployment/scripts-linux/start-postgres.sh new file mode 100755 index 000000000..e6c4bf8b4 --- /dev/null +++ b/deployment/scripts-linux/start-postgres.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +# ============================================================ +# start-postgres.sh +# Starts the sensenet PostgreSQL stack. +# +# Usage: +# ./start-postgres.sh # start (rebuild if needed) +# ./start-postgres.sh --no-build # start without rebuild +# ./start-postgres.sh --build # force rebuild snapp image +# ./start-postgres.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEPLOY_DIR="$SCRIPT_DIR/.." +COMPOSE_FILE="$DEPLOY_DIR/docker-compose.postgres.yml" +BUILD_ARG="" + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --no-build) BUILD_ARG="--no-build" ;; + --build) BUILD_ARG="--build" ;; + --help|-h) + echo "Usage: $0 [--build] [--no-build] [--help]" + echo "" + echo "Starts the sensenet PostgreSQL stack." + echo "" + echo "Options:" + echo " --build Force rebuild the snapp Docker image" + echo " --no-build Skip building, use existing image" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +GREEN='\033[0;32m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +step() { echo -e "\n${CYAN}${BOLD}▶ $1${NC}"; } +ok() { echo -e " ${GREEN}✓ $1${NC}"; } + +echo -e "${BOLD}" +echo "╔═══════════════════════════════════════════════╗" +echo "║ sensenet PostgreSQL — Start ║" +echo "╚═══════════════════════════════════════════════╝" +echo -e "${NC}" + +# ── Start ──────────────────────────────────────────────────── +step "Starting the PostgreSQL stack..." +docker compose -f "$COMPOSE_FILE" up -d $BUILD_ARG +ok "All containers started" + +# ── Wait for sensenet ──────────────────────────────────────── +step "Waiting for sensenet to become ready..." +echo -e " ${CYAN}(may take 1–3 minutes on fresh install)${NC}" + +MAX_WAIT=180 +ELAPSED=0 +while [ $ELAPSED -lt $MAX_WAIT ]; do + HTTP_CODE=$(curl -sk -o /dev/null -w "%{http_code}" https://localhost:44362/odata.svc/Root 2>/dev/null || echo "000") + if [ "$HTTP_CODE" = "200" ]; then + ok "sensenet is ready! (HTTP 200 after ${ELAPSED}s)" + break + fi + printf "\r ⏳ Waiting... %3ds / %ds (last HTTP: %s)" "$ELAPSED" "$MAX_WAIT" "$HTTP_CODE" + sleep 3 + ELAPSED=$((ELAPSED + 3)) +done + +if [ $ELAPSED -ge $MAX_WAIT ]; then + echo "" + echo -e " ${CYAN}⚠ Timed out. Check logs: docker compose -f docker-compose.postgres.yml logs -f snapp${NC}" +fi + +# ── Done ───────────────────────────────────────────────────── +echo "" +echo -e "${GREEN}${BOLD}╔═══════════════════════════════════╗${NC}" +echo -e "${GREEN}${BOLD}║ ✅ Stack is running! ║${NC}" +echo -e "${GREEN}${BOLD}╚═══════════════════════════════════╝${NC}" +echo "" +echo -e " ${CYAN}Repository:${NC} https://localhost:44362" +echo -e " ${CYAN}SnAuth:${NC} https://localhost:44311" +echo -e " ${CYAN}pgAdmin:${NC} http://localhost:5433" +echo -e " ${CYAN}PostgreSQL:${NC} localhost:5532" +echo "" +echo -e " ${CYAN}API Key:${NC} docker compose -f docker-compose.postgres.yml run --rm apikey" +echo -e " ${CYAN}Logs:${NC} docker compose -f docker-compose.postgres.yml logs -f snapp" +echo "" diff --git a/deployment/scripts-linux/stop-mssql.sh b/deployment/scripts-linux/stop-mssql.sh new file mode 100755 index 000000000..08308db76 --- /dev/null +++ b/deployment/scripts-linux/stop-mssql.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# ============================================================ +# stop-mssql.sh +# Stops the sensenet MSSQL stack. +# +# Usage: +# ./stop-mssql.sh # stop containers (keep data) +# ./stop-mssql.sh --clean # stop + remove volumes +# ./stop-mssql.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEPLOY_DIR="$SCRIPT_DIR/.." +COMPOSE_FILE="$DEPLOY_DIR/docker-compose.mssql.yml" +CLEAN=false + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --clean) CLEAN=true ;; + --help|-h) + echo "Usage: $0 [--clean] [--help]" + echo "" + echo "Stops the sensenet MSSQL stack." + echo "" + echo "Options:" + echo " --clean Also remove Docker volumes (database data!)" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +step() { echo -e "\n${CYAN}${BOLD}▶ $1${NC}"; } +ok() { echo -e " ${GREEN}✓ $1${NC}"; } +warn() { echo -e " ${YELLOW}⚠ $1${NC}"; } + +echo -e "${BOLD}" +echo "╔═══════════════════════════════════════════════╗" +echo "║ sensenet MSSQL — Stop ║" +echo "╚═══════════════════════════════════════════════╝" +echo -e "${NC}" + +# ── Stop ───────────────────────────────────────────────────── +if [ "$CLEAN" = true ]; then + step "Stopping containers and removing volumes..." + docker compose -f "$COMPOSE_FILE" --profile reset down --volumes --remove-orphans 2>/dev/null || true + ok "Containers stopped, volumes removed" + warn "Database data has been permanently deleted!" +else + step "Stopping containers (data preserved)..." + docker compose -f "$COMPOSE_FILE" --profile reset down --remove-orphans 2>/dev/null || true + ok "Containers stopped" +fi + +echo "" +echo -e " ${GREEN}Done.${NC} 👋" +echo "" diff --git a/deployment/scripts-linux/stop-postgres.sh b/deployment/scripts-linux/stop-postgres.sh new file mode 100755 index 000000000..3d9a11e76 --- /dev/null +++ b/deployment/scripts-linux/stop-postgres.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# ============================================================ +# stop-postgres.sh +# Stops the sensenet PostgreSQL stack. +# +# Usage: +# ./stop-postgres.sh # stop containers (keep data) +# ./stop-postgres.sh --clean # stop + remove volumes +# ./stop-postgres.sh --help +# ============================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DEPLOY_DIR="$SCRIPT_DIR/.." +COMPOSE_FILE="$DEPLOY_DIR/docker-compose.postgres.yml" +CLEAN=false + +# ── Parse args ─────────────────────────────────────────────── +for arg in "$@"; do + case "$arg" in + --clean) CLEAN=true ;; + --help|-h) + echo "Usage: $0 [--clean] [--help]" + echo "" + echo "Stops the sensenet PostgreSQL stack." + echo "" + echo "Options:" + echo " --clean Also remove Docker volumes (database data!)" + echo " --help Show this help" + exit 0 + ;; + esac +done + +# ── Colors ─────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +step() { echo -e "\n${CYAN}${BOLD}▶ $1${NC}"; } +ok() { echo -e " ${GREEN}✓ $1${NC}"; } +warn() { echo -e " ${YELLOW}⚠ $1${NC}"; } + +echo -e "${BOLD}" +echo "╔═══════════════════════════════════════════════╗" +echo "║ sensenet PostgreSQL — Stop ║" +echo "╚═══════════════════════════════════════════════╝" +echo -e "${NC}" + +# ── Stop ───────────────────────────────────────────────────── +if [ "$CLEAN" = true ]; then + step "Stopping containers and removing volumes..." + docker compose -f "$COMPOSE_FILE" --profile reset down --volumes --remove-orphans 2>/dev/null || true + ok "Containers stopped, volumes removed" + warn "Database data has been permanently deleted!" +else + step "Stopping containers (data preserved)..." + docker compose -f "$COMPOSE_FILE" --profile reset down --remove-orphans 2>/dev/null || true + ok "Containers stopped" +fi + +echo "" +echo -e " ${GREEN}Done.${NC} 👋" +echo "" diff --git a/docs/notes/absztrakt-reteg-mssql-szivargasok-javitasa.md b/docs/notes/absztrakt-reteg-mssql-szivargasok-javitasa.md new file mode 100644 index 000000000..21c45ae7f --- /dev/null +++ b/docs/notes/absztrakt-reteg-mssql-szivargasok-javitasa.md @@ -0,0 +1,678 @@ +# 🔧 Absztrakt réteg MSSQL szivárgás javítása — Részletes terv + +## Összefoglaló + +Az `SnDataContext` és `RelationalDataProviderBase` osztályok **elvileg adatbázis-független absztrakt rétegek**, de mindkettő közvetlenül hivatkozik MSSQL-specifikus kódra. Ez megakadályozza, hogy bármilyen nem-MSSQL provider (PostgreSQL, MySQL, stb.) tisztán implementálható legyen. Ez a dokumentum részletesen leírja, **mit, hol és hogyan** kell javítani. + +--- + +## 1. A probléma pontos leírása + +### 1.1 `SnDataContext` — 3 MSSQL-szivárgás + +**Fájl**: `src/Common/Storage/Data/SnDataContext.cs` (226 sor) +**Projekt**: `SenseNet.Common` — a legalsó szintű közös csomag, minden más projekt hivatkozik rá + +| # | Sor | Probléma | Súlyosság | +|---|-----|----------|-----------| +| 1 | 4. sor | `using Microsoft.Data.SqlClient;` import | 🔴 Kritikus | +| 2 | 191-193 | `ShouldRetryOnError()` — `ex is SqlException` ellenőrzés | 🔴 Kritikus | +| 3 | — | `SenseNet.Common.csproj` — `Microsoft.Data.SqlClient` NuGet csomag függőség | 🔴 Kritikus | + +#### A jelenlegi hibás kód (`SnDataContext.cs`, 189-194. sor): + +```csharp +internal static bool ShouldRetryOnError(Exception ex) +{ + //TODO: generalize the expression by relying on error codes instead of hardcoded message texts + return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || + (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); +} +``` + +> ⚠️ A fejlesztők **tudatában vannak** a problémának — a `//TODO` komment 2018 óta ott van. + +#### Miért probléma? + +- A `SenseNet.Common` NuGet csomagot **minden** sensenet projekt hivatkozza +- Ez azt jelenti, hogy **minden projekt tranzitíven függ** a `Microsoft.Data.SqlClient`-től +- Egy PostgreSQL provider nem tudja elkerülni az MSSQL driver betöltését +- Ráadásul a `ShouldRetryOnError()` metódus `internal static` — **nem overridolható** + +### 1.2 `RelationalDataProviderBase` — 2 MSSQL-szivárgás + +**Fájl**: `src/Storage/Data/RelationalDataProviderBase.cs` (2759 sor) +**Projekt**: `SenseNet.Storage` + +| # | Sor | Probléma | Súlyosság | +|---|-----|----------|-----------| +| 1 | 5. sor | `using Microsoft.Data.SqlClient;` import | 🔴 Kritikus | +| 2 | 2565-2571 | `IsDatabaseReadyAsync()` — `catch (SqlException ex)` + error number `4060`/`233` | 🔴 Kritikus | +| 3 | 2751-2755 | `ShouldRetryOnError()` — `ex is SqlException` ellenőrzés (duplikáció!) | 🟡 Közepes | + +#### A jelenlegi hibás kód #1 (`IsDatabaseReadyAsync`, 2546-2578. sor): + +```csharp +public override async Task IsDatabaseReadyAsync(CancellationToken cancellationToken) +{ + const string schemaCheckSql = @" +SELECT CASE WHEN EXISTS ( + SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = N'Nodes' +) +THEN CAST(1 AS BIT) +ELSE CAST(0 AS BIT) END"; + + using var op = SnTrace.Database.StartOperation("RelationalDataProviderBase: IsDatabaseReady()"); + + using var ctx = CreateDataContext(cancellationToken); + bool result; + try + { + var dbResult = await ctx.ExecuteScalarAsync(schemaCheckSql).ConfigureAwait(false); + result = Convert.ToBoolean(dbResult); + } + catch (SqlException ex) // ← MSSQL-specifikus! + { + if (ex.Number is 4060 or 233) // ← MSSQL error kódok! + result = false; + else + throw; + } + op.Successful = true; + return result; +} +``` + +#### A jelenlegi hibás kód #2 (`ShouldRetryOnError`, 2751-2755. sor): + +```csharp +protected virtual bool ShouldRetryOnError(Exception ex) +{ + //TODO: generalize the expression by relying on error codes instead of hardcoded message texts + return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || + (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); +} +``` + +> Ez **szó szerint ugyanaz a kód** mint az `SnDataContext.ShouldRetryOnError()` — duplikáció. + +### 1.3 `MsSqlDataContext` rossz helyen van + +**Fájl**: `src/Common/Storage/Data/MsSqlClient/MsSqlDataContext.cs` (225 sor) +**Projekt**: `SenseNet.Common` ← **ITT A BAJ!** + +Az MSSQL-specifikus `MsSqlDataContext` a **Common** projektben él, nem a `ContentRepository.MsSql` projektben. Ez az oka, hogy a `SenseNet.Common.csproj`-nak szüksége van a `Microsoft.Data.SqlClient` NuGet csomagra. + +--- + +## 2. Mi a helyes architektúra? + +### Jelenlegi állapot (hibás): + +``` +SenseNet.Common (NuGet: Microsoft.Data.SqlClient ⚠️) + ├── SnDataContext (abstract, de SqlException-t használ ⚠️) + └── MsSqlClient/ + └── MsSqlDataContext (konkrét MSSQL — rossz helyen! ⚠️) + +SenseNet.Storage (tranzitív: Microsoft.Data.SqlClient ⚠️) + └── RelationalDataProviderBase (abstract, de SqlException-t használ ⚠️) + +SenseNet.ContentRepository.MsSql + └── MsSqlDataProvider (konkrét MSSQL — rendben ✅) +``` + +### Cél állapot (tiszta): + +``` +SenseNet.Common (NEM függ Microsoft.Data.SqlClient-től ✅) + └── SnDataContext (abstract, NINCS SqlException referencia ✅) + +SenseNet.Storage (NEM függ Microsoft.Data.SqlClient-től ✅) + └── RelationalDataProviderBase (abstract, NINCS SqlException referencia ✅) + +SenseNet.ContentRepository.MsSql (NuGet: Microsoft.Data.SqlClient ✅) + ├── MsSqlDataContext (ide áthelyezve ✅) + └── MsSqlDataProvider (konkrét MSSQL — rendben ✅) + +SenseNet.ContentRepository.PostgreSql (NuGet: Npgsql ✅) ← LEHETŐVÉ VÁLIK + ├── PgSqlDataContext + └── PgSqlDataProvider +``` + +--- + +## 3. Javítási terv — `SnDataContext` + +### 3.1 A `ShouldRetryOnError()` átalakítása + +#### Jelenlegi kód (189-194. sor): + +```csharp +internal static bool ShouldRetryOnError(Exception ex) +{ + //TODO: generalize the expression by relying on error codes instead of hardcoded message texts + return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || + (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); +} +``` + +#### Javasolt megoldás: + +A metódust **kettéválasztjuk**: az adatbázis-független rész marad az absztrakt osztályban, az adatbázis-specifikus rész `protected virtual` lesz: + +```csharp +/// +/// Determines whether the given exception is a transient error that should be retried. +/// The base implementation handles connection pool exhaustion (ADO.NET provider-independent). +/// Override in derived classes to add database-specific transient error detection. +/// +protected virtual bool IsTransientError(Exception ex) +{ + // Connection pool exhaustion — ez ADO.NET szinten adatbázis-független + return ex is InvalidOperationException && + ex.Message.Contains("connection from the pool"); +} +``` + +A `RetryAsync` metódus a belső `ShouldRetryOnError` helyett az `IsTransientError`-t hívja: + +```csharp +public Task RetryAsync(Func> action, CancellationToken cancel) +{ + return _retrier.RetryAsync(action, + shouldRetryOnError: (ex, _) => IsTransientError(ex), // ← módosítva + onAfterLastIteration: (_, ex, i) => + { + SnTrace.Database.WriteError( + $"Data layer error: {ex.Message}. Retry cycle ended after {i} iterations."); + throw new InvalidOperationException("Data layer timeout occurred.", ex); + }, + cancel: cancel); +} +``` + +#### A `MsSqlDataContext`-ben az override: + +```csharp +public class MsSqlDataContext : SnDataContext +{ + protected override bool IsTransientError(Exception ex) + { + // Adatbázis-független transient hibák (connection pool) + if (base.IsTransientError(ex)) + return true; + + // MSSQL-specifikus hálózati hibák + return ex is SqlException sqlEx && + sqlEx.Message.Contains("A network-related or instance-specific error occurred"); + } +} +``` + +#### Egy jövőbeli `PgSqlDataContext`-ben: + +```csharp +public class PgSqlDataContext : SnDataContext +{ + protected override bool IsTransientError(Exception ex) + { + if (base.IsTransientError(ex)) + return true; + + // PostgreSQL-specifikus transient hibák + return ex is NpgsqlException npgEx && + (npgEx.IsTransient || + (npgEx is PostgresException pgEx && pgEx.SqlState == "57P01")); // admin_shutdown + } +} +``` + +### 3.2 A `using Microsoft.Data.SqlClient;` eltávolítása + +Miután a `ShouldRetryOnError()`-ből eltávolítjuk a `SqlException` referenciát, a `using Microsoft.Data.SqlClient;` sor törölhető a 4. sorból. + +### 3.3 Teljes diff az `SnDataContext.cs`-ben + +```diff + using System; + using System.Data; + using System.Data.Common; +-using Microsoft.Data.SqlClient; + using System.Threading; + using System.Threading.Tasks; + using System.Transactions; +@@ -186,11 +185,16 @@ + } + +- internal static bool ShouldRetryOnError(Exception ex) ++ /// ++ /// Determines whether the given exception is a transient error that should be retried. ++ /// Override in derived classes to add database-specific transient error detection. ++ /// ++ protected virtual bool IsTransientError(Exception ex) + { +- //TODO: generalize the expression by relying on error codes instead of hardcoded message texts +- return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || +- (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); ++ // Connection pool exhaustion — ADO.NET szinten adatbázis-független ++ return ex is InvalidOperationException && ++ ex.Message.Contains("connection from the pool"); + } + +@@ -209,7 +213,7 @@ + public Task RetryAsync(Func> action, CancellationToken cancel) + { + return _retrier.RetryAsync(action, +- shouldRetryOnError: (ex, _) => ShouldRetryOnError(ex), ++ shouldRetryOnError: (ex, _) => IsTransientError(ex), + onAfterLastIteration: (_, ex, i) => + { + SnTrace.Database.WriteError( +``` + +### 3.4 Hatáselemzés + +| Elem | Hatás | +|------|-------| +| `MsSqlDataContext` | Override-olnia kell az `IsTransientError()`-t az MSSQL-specifikus ellenőrzéssel | +| `InMemoryDataContext` | Nem érintett — az alapértelmezett viselkedés megfelelő | +| Teszt `DataContext`-ek | Nem érintett — a connection pool check továbbra is működik | +| Visszafelé kompatibilitás | A viselkedés **nem változik** az MSSQL provider-nél, ha az override helyes | + +--- + +## 4. Javítási terv — `RelationalDataProviderBase` + +### 4.1 Az `IsDatabaseReadyAsync()` átalakítása + +#### Jelenlegi kód (2546-2578. sor): + +```csharp +public override async Task IsDatabaseReadyAsync(CancellationToken cancellationToken) +{ + const string schemaCheckSql = @" +SELECT CASE WHEN EXISTS ( + SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = N'Nodes' +) +THEN CAST(1 AS BIT) +ELSE CAST(0 AS BIT) END"; + + using var ctx = CreateDataContext(cancellationToken); + bool result; + try + { + var dbResult = await ctx.ExecuteScalarAsync(schemaCheckSql).ConfigureAwait(false); + result = Convert.ToBoolean(dbResult); + } + catch (SqlException ex) + { + if (ex.Number is 4060 or 233) + result = false; + else + throw; + } + return result; +} +``` + +#### Problémák: + +1. `catch (SqlException ex)` — csak MSSQL exception-öket kap el +2. `ex.Number is 4060 or 233` — MSSQL-specifikus hibakódok: + - `4060` = "Cannot open database requested by the login" + - `233` = "A connection was successfully established with the server, but then an error occurred" +3. Az SQL script `CAST(1 AS BIT)` — MSSQL-specifikus (PostgreSQL-ben `CAST(1 AS BOOLEAN)` lenne) + +#### Javasolt megoldás — A: Template Method minta + +```csharp +public override async Task IsDatabaseReadyAsync(CancellationToken cancellationToken) +{ + using var op = SnTrace.Database.StartOperation("RelationalDataProviderBase: IsDatabaseReady()"); + + using var ctx = CreateDataContext(cancellationToken); + bool result; + try + { + var dbResult = await ctx.ExecuteScalarAsync(SchemaCheckScript).ConfigureAwait(false); + result = Convert.ToBoolean(dbResult); + } + catch (Exception ex) when (IsDatabaseNotAvailableException(ex)) + { + // The database does not exist yet or is not accessible. + result = false; + } + op.Successful = true; + + return result; +} + +/// +/// Gets the SQL script that checks whether the database schema is ready. +/// Should return a boolean-compatible scalar (1/true or 0/false). +/// +protected abstract string SchemaCheckScript { get; } + +/// +/// Determines whether the given exception indicates that the database +/// is not available (e.g. does not exist, login failed, not accessible). +/// +protected abstract bool IsDatabaseNotAvailableException(Exception ex); +``` + +#### `MsSqlDataProvider` implementáció: + +```csharp +protected override string SchemaCheckScript => @" +SELECT CASE WHEN EXISTS ( + SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = N'Nodes' +) +THEN CAST(1 AS BIT) +ELSE CAST(0 AS BIT) END"; + +protected override bool IsDatabaseNotAvailableException(Exception ex) +{ + // 4060: Cannot open database requested by the login. The login failed. + // 233: Connection established but then error occurred during login. + return ex is SqlException sqlEx && sqlEx.Number is 4060 or 233; +} +``` + +#### Egy jövőbeli `PgSqlDataProvider` implementáció: + +```csharp +protected override string SchemaCheckScript => @" +SELECT CASE WHEN EXISTS ( + SELECT * FROM information_schema.tables WHERE table_name = 'Nodes' +) +THEN true +ELSE false END"; + +protected override bool IsDatabaseNotAvailableException(Exception ex) +{ + // 3D000: invalid_catalog_name (database does not exist) + // 28P01: invalid_password + // 28000: invalid_authorization_specification + return ex is PostgresException pgEx && + pgEx.SqlState is "3D000" or "28P01" or "28000"; +} +``` + +### 4.2 A `ShouldRetryOnError()` átalakítása (duplikáció eltávolítása) + +#### Jelenlegi kód (2751-2755. sor): + +```csharp +protected virtual bool ShouldRetryOnError(Exception ex) +{ + //TODO: generalize the expression by relying on error codes instead of hardcoded message texts + return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || + (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); +} +``` + +Ez **duplikáció** — ugyanaz a kód mint az `SnDataContext.ShouldRetryOnError()`. + +#### Javasolt megoldás: + +A `RelationalDataProviderBase` `ShouldRetryOnError()` metódusát **delegáljuk az `SnDataContext`-re**: + +```csharp +protected virtual bool ShouldRetryOnError(Exception ex) +{ + // Delegate to the data context's transient error detection. + // This avoids duplicating the database-specific retry logic. + // The concrete data context (MsSql, PostgreSQL, etc.) provides + // the database-specific checks via IsTransientError() override. + using var ctx = CreateDataContext(CancellationToken.None); + return ctx.IsTransientError(ex); +} +``` + +**Alternatíva** (ha a context létrehozás overhead nem kívánatos): + +Az `SnDataContext.IsTransientError()` logikáját kiemelhetjük egy statikus utility-be, vagy a `RelationalDataProviderBase`-ben is bevezethetünk egy abstract `IsTransientError()`-t: + +```csharp +/// +/// Determines whether the exception is a transient database error that should be retried. +/// Must be overridden in database-specific implementations. +/// +protected abstract bool IsTransientError(Exception ex); + +protected bool ShouldRetryOnError(Exception ex) +{ + // Connection pool exhaustion — ADO.NET szinten adatbázis-független + if (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) + return true; + + // Database-specific transient errors + return IsTransientError(ex); +} +``` + +#### `MsSqlDataProvider` implementáció: + +```csharp +protected override bool IsTransientError(Exception ex) +{ + return ex is SqlException sqlEx && + sqlEx.Message.Contains("A network-related or instance-specific error occurred"); +} +``` + +### 4.3 A `using Microsoft.Data.SqlClient;` eltávolítása + +Az import az 5. sorban törölhető, miután mindkét `SqlException` referencia eltávolításra került. + +### 4.4 Teljes diff a `RelationalDataProviderBase.cs`-ben + +```diff + using System; + using System.Collections.Generic; + using System.Data; + using System.Data.Common; +-using Microsoft.Data.SqlClient; + using System.Globalization; + using System.Linq; + +@@ -2543,6 +2542,9 @@ + protected abstract string LoadEntityTreeScript { get; } + ++ protected abstract string SchemaCheckScript { get; } ++ protected abstract bool IsDatabaseNotAvailableException(Exception ex); ++ + public override async Task IsDatabaseReadyAsync(CancellationToken cancellationToken) + { +- const string schemaCheckSql = @" +-SELECT CASE WHEN EXISTS ( +- SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = N'Nodes' +-) +-THEN CAST(1 AS BIT) +-ELSE CAST(0 AS BIT) END"; +- + using var op = SnTrace.Database.StartOperation("RelationalDataProviderBase: IsDatabaseReady()"); + + using var ctx = CreateDataContext(cancellationToken); + bool result; + try + { +- var dbResult = await ctx.ExecuteScalarAsync(schemaCheckSql).ConfigureAwait(false); ++ var dbResult = await ctx.ExecuteScalarAsync(SchemaCheckScript).ConfigureAwait(false); + result = Convert.ToBoolean(dbResult); + } +- catch (SqlException ex) ++ catch (Exception ex) when (IsDatabaseNotAvailableException(ex)) + { +- if (ex.Number is 4060 or 233) +- result = false; +- else +- throw; ++ result = false; + } + op.Successful = true; + +@@ -2748,10 +2740,12 @@ + +- protected virtual bool ShouldRetryOnError(Exception ex) ++ protected abstract bool IsTransientError(Exception ex); ++ ++ protected bool ShouldRetryOnError(Exception ex) + { +- //TODO: generalize the expression by relying on error codes instead of hardcoded message texts +- return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || +- (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); ++ if (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) ++ return true; ++ return IsTransientError(ex); + } +``` + +--- + +## 5. Javítási terv — `MsSqlDataContext` áthelyezése + +### 5.1 Jelenlegi helyzet + +A `MsSqlDataContext.cs` (225 sor) jelenleg itt van: +``` +src/Common/Storage/Data/MsSqlClient/MsSqlDataContext.cs +``` + +Ez a `SenseNet.Common` projektben van, ami a `Microsoft.Data.SqlClient` NuGet csomagot is ide kényszeríti. + +### 5.2 Célhelyzet + +Áthelyezés ide: +``` +src/ContentRepository.MsSql/MsSqlDataContext.cs +``` + +### 5.3 Lépések + +1. **Fájl áthelyezése**: `MsSqlDataContext.cs` mozgatása `Common/Storage/Data/MsSqlClient/` → `ContentRepository.MsSql/` +2. **Namespace megtartása**: A `namespace SenseNet.ContentRepository.Storage.Data.MsSqlClient` marad, nem kell változtatni +3. **`SenseNet.Common.csproj` módosítása**: `Microsoft.Data.SqlClient` PackageReference eltávolítása +4. **`SenseNet.ContentRepository.MsSql.csproj` módosítása**: `Microsoft.Data.SqlClient` PackageReference hozzáadása (ha még nincs) +5. **Referenciák ellenőrzése**: Minden fájl ami `MsSqlDataContext`-et használ, a `ContentRepository.MsSql` projektre kell hivatkozzon + +### 5.4 Hatáselemzés + +| Elem | Hatás | +|------|-------| +| `SenseNet.Common.csproj` | `Microsoft.Data.SqlClient` NuGet csomag eltávolítható → ~5MB-tal kisebb dependency | +| `SenseNet.ContentRepository.MsSql.csproj` | Már hivatkozik rá tranzitíven, explicit PackageReference hozzáadandó | +| Az `MsSqlDataContext`-et használó összes fájl | Mind a `ContentRepository.MsSql` projektben vannak → nincs hatás | +| A `Common`-ra hivatkozó projektek | **Nem függnek többé** MSSQL drivertől → PostgreSQL provider lehetővé válik | + +### 5.5 Kockázat + +⚠️ **Ha bármely más projekt** (nem `ContentRepository.MsSql`) közvetlenül hivatkozik az `MsSqlDataContext`-re, az **fordítási hibát** okoz. Ellenőrzendő: + +- `SenseNet.Storage` — NEM használja közvetlenül ✅ +- `SenseNet.ContentRepository` — NEM használja közvetlenül ✅ +- `SenseNet.BlobStorage` — Ellenőrizni kell! A `BuiltInBlobProvider` saját `SqlConnection`-t hoz létre, de nem `MsSqlDataContext`-et +- Tesztek — Az `MsSqlTests` projekt hivatkozik rá, de az már hivatkozik `ContentRepository.MsSql`-re is + +--- + +## 6. A `SenseNet.Common.csproj` módosítás + +### Jelenlegi: + +```xml + +``` + +### Javított: + +Ez a sor **törlendő** a `SenseNet.Common.csproj`-ból, és **hozzáadandó** a `SenseNet.ContentRepository.MsSql.csproj`-hoz. + +--- + +## 7. Összefoglalás — Végrehajtási sorrend + +### Lépés 1: `SnDataContext.ShouldRetryOnError()` → `IsTransientError()` (1 nap) + +| # | Tennivaló | Fájl | +|---|-----------|------| +| 1a | `ShouldRetryOnError()` átnevezése → `IsTransientError()`, `internal static` → `protected virtual` | `SnDataContext.cs` | +| 1b | Az MSSQL-specifikus ellenőrzés kiemelése a base-ből | `SnDataContext.cs` | +| 1c | `RetryAsync()` módosítása: `ShouldRetryOnError` → `IsTransientError` hívás | `SnDataContext.cs` | +| 1d | `using Microsoft.Data.SqlClient;` törlése | `SnDataContext.cs` | +| 1e | `MsSqlDataContext`-ben `IsTransientError()` override bevezetése | `MsSqlDataContext.cs` | +| 1f | Unit tesztek futtatása | — | + +### Lépés 2: `RelationalDataProviderBase` tisztítása (1 nap) + +| # | Tennivaló | Fájl | +|---|-----------|------| +| 2a | `IsDatabaseReadyAsync()` — `SchemaCheckScript` abstract property bevezetése | `RelationalDataProviderBase.cs` | +| 2b | `IsDatabaseReadyAsync()` — `IsDatabaseNotAvailableException()` abstract method bevezetése | `RelationalDataProviderBase.cs` | +| 2c | `IsDatabaseReadyAsync()` — `catch (SqlException)` → `catch (Exception) when (...)` | `RelationalDataProviderBase.cs` | +| 2d | `ShouldRetryOnError()` duplikáció eltávolítása + `IsTransientError()` abstract bevezetése | `RelationalDataProviderBase.cs` | +| 2e | `using Microsoft.Data.SqlClient;` törlése | `RelationalDataProviderBase.cs` | +| 2f | `MsSqlDataProvider`-ben a 3 új abstract member implementálása | `MsSqlDataProvider.cs` | +| 2g | Integrációs tesztek futtatása | — | + +### Lépés 3: `MsSqlDataContext` áthelyezése + NuGet tisztítás (1 nap) + +| # | Tennivaló | Fájl | +|---|-----------|------| +| 3a | `MsSqlDataContext.cs` áthelyezése `Common` → `ContentRepository.MsSql` | fájlrendszer | +| 3b | `Microsoft.Data.SqlClient` eltávolítása `SenseNet.Common.csproj`-ból | `SenseNet.Common.csproj` | +| 3c | `Microsoft.Data.SqlClient` hozzáadása `SenseNet.ContentRepository.MsSql.csproj`-hoz | csproj | +| 3d | Fordítás + referenciahibák javítása | — | +| 3e | Teljes teszt futtatás | — | + +### Lépés 4: Validáció (0.5-1 nap) + +| # | Tennivaló | +|---|-----------| +| 4a | `dotnet build` az egész solution-re | +| 4b | Összes unit teszt futtatás | +| 4c | MSSQL integrációs tesztek futtatása | +| 4d | Ellenőrzés: `SenseNet.Common.dll` assembly-ben nincs `Microsoft.Data.SqlClient` referencia | +| 4e | Ellenőrzés: `SenseNet.Storage.dll` assembly-ben nincs `Microsoft.Data.SqlClient` referencia | + +--- + +## 8. Új abstract memberek összefoglalása + +A javítás után a `MsSqlDataProvider`-ben (és bármely jövőbeli provider-ben) implementálandó új abstract memberek: + +| Osztály | Új member | Típus | Cél | +|---------|-----------|-------|-----| +| `SnDataContext` | `IsTransientError(Exception)` | `protected virtual bool` | Transient hiba detektálás (retry) | +| `RelationalDataProviderBase` | `SchemaCheckScript` | `protected abstract string` | SQL az adatbázis készenlét ellenőrzésére | +| `RelationalDataProviderBase` | `IsDatabaseNotAvailableException(Exception)` | `protected abstract bool` | Adatbázis nem elérhető hiba detektálás | +| `RelationalDataProviderBase` | `IsTransientError(Exception)` | `protected abstract bool` | Transient hiba detektálás (retry) — a provider szinten | + +> **Megjegyzés**: Az `IsDeadlockException(Exception)` már **helyesen abstract** a `DataProvider` base class-ban, és az `MsSqlDataProvider` helyesen override-olja. Ez nem szorul javításra. + +--- + +## 9. Kockázatok és mitigáció + +| Kockázat | Súlyosság | Mitigáció | +|----------|-----------|-----------| +| Az `IsTransientError()` nem-`static` lett → más a hívási minta | 🟡 Közepes | A `RetryAsync()` metódus ugyanúgy hívja, nincs API törés | +| `MsSqlDataContext` áthelyezés referenciatörés | 🟡 Közepes | Fordítás-idejű hiba, azonnal látható | +| `Microsoft.Data.SqlClient` eltávolítása más projektet is érint | 🟡 Közepes | Minden hivatkozó projektet ellenőrizni | +| A `ShouldRetryOnError` visszafelé kompatibilitás | 🟢 Alacsony | A régi `internal static` metódus nem volt publikus API | +| Teszt projektek `MsSqlDataContext` hivatkozása | 🟡 Közepes | Teszt projektekbe is hozzáadni a `ContentRepository.MsSql` referenciát | + +--- + +## 10. Becsült idő + +| Fázis | Idő | +|-------|-----| +| `SnDataContext` javítás + `MsSqlDataContext` override | **1 nap** | +| `RelationalDataProviderBase` javítás + `MsSqlDataProvider` implementáció | **1 nap** | +| `MsSqlDataContext` áthelyezés + NuGet tisztítás | **1 nap** | +| Validáció, teszt futtatás, edge-case javítás | **0.5-1 nap** | +| **Összesen** | **3.5-4 nap** | + +Ez az eredeti becslés (2×2-3 nap = 4-6 nap) **alsó tartományába** esik, mert a javítások jól definiáltak és a kódbázis áttekinthető. diff --git a/docs/notes/connection-pool-kimeriules-elemzes.md b/docs/notes/connection-pool-kimeriules-elemzes.md new file mode 100644 index 000000000..6b5623d0b --- /dev/null +++ b/docs/notes/connection-pool-kimeriules-elemzes.md @@ -0,0 +1,304 @@ +# 🔍 Connection Pool kimerülés elemzése — MSSQL provider + +## Összefoglaló + +Ez a dokumentum a sensenet MSSQL providerében található **potenciális connection pool szivárgási pontokat** elemzi. A probléma jellemzően **nagy terhelés alatt** (pl. sérülékenységi vizsgálatok, stressz tesztek) jelentkezik, amikor a `SqlConnection` pool eléri a maximális limitet és az alkalmazás nem tud új kapcsolatokat nyitni. + +> **Fontos**: Ez a dokumentum **nem az absztrakciós szivárgásról** (abstraction leak) szól, amit a [megvalósíthatósági elemzés](megvalosithatosagi-elemzes.md) tárgyal. Az absztrakciós szivárgás azt jelenti, hogy az adatbázis-független rétegben MSSQL-specifikus kód szerepel (pl. `SqlException` referencia az `SnDataContext`-ben). A jelen dokumentum a **valódi connection pool kimerülés** okait vizsgálja. + +--- + +## 1. A connection pool működése + +Az ADO.NET `SqlConnection` pool alapértelmezetten **100 kapcsolatot** engedélyez connection stringenként. Amikor egy `SqlConnection.Open()` hívás történik, a pool egy szabad kapcsolatot ad vissza. Ha nincs szabad, és a limit elérve, a hívás **várakozik** (alapértelmezetten 15 mp), majd `InvalidOperationException`-t dob: `"Timeout expired. The timeout period elapsed prior to obtaining a connection from the pool."`. + +--- + +## 2. Az `SnDataContext` connection-kezelési modellje + +Az `SnDataContext` (fájl: `src/Common/Storage/Data/SnDataContext.cs`) `IDisposable` és manuálisan kezeli a kapcsolatot: + +```csharp +protected DbConnection OpenConnection() +{ + if (_connection?.State == ConnectionState.Closed || _connection?.State == ConnectionState.Broken) + { + _connection.Dispose(); + _connection = null; + } + if (_connection == null) + { + _connection = CreateConnection(); + _connection.Open(); + } + return _connection; +} +``` + +### A modell lényege + +- Egy `SnDataContext` példány **egy connection-t tart életben** a teljes élettartama alatt. +- A connection csak a `Dispose()` hívásakor szabadul fel. +- Ha bármelyik hívó **elfelejtkezik a `Dispose()`-ról** (vagyis nem `using` blokkban használja), a connection **nem kerül vissza a pool-ba** amíg a GC nem gyűjti be. + +--- + +## 3. Gyanús pontok a kódbázisban + +### 3.1 🔴 `ExecuteReaderAsync` — connection tartása callback futása alatt + +**Fájl**: `src/Common/Storage/Data/MsSqlClient/MsSqlDataContext.cs` + +```csharp +public async Task ExecuteReaderAsync(string script, Action setParams, + Func> callbackAsync) +{ + // ... + cmd.Connection = (SqlConnection) OpenConnection(); // ← connection megnyitva + // ... + using (var reader = await cmd.ExecuteReaderAsync(cancellationToken)) + { + var result = await callbackAsync(reader, cancellationToken); // ← callback futása alatt foglalt! + return result; + } +} +``` + +**Probléma**: Ha a `callbackAsync` **lassú** (pl. nagy eredményhalmaz feldolgozása, sok sor iterálás), vagy ha **sok párhuzamos kérés** futtat ilyet egyszerre, a connection pool kimerülhet. + +**Hatás**: A sérülékenységi vizsgálatok tipikusan **sok párhuzamos, gyors egymás utáni kérést** generálnak, ami pontosan ezt a helyzetet idézi elő — az összes connection foglalt marad a callback-ek futása alatt. + +### 3.2 🔴 Retry logika exception közben + +**Fájl**: `src/Common/Storage/Data/SnDataContext.cs`, `RetryAsync()` metódus + +```csharp +protected virtual bool ShouldRetryOnError(Exception ex) +{ + return (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); +} +``` + +**Probléma**: Ha a retry megnyit egy újabb kapcsolatot, de az exception kezelés nem dispose-olja az előzőt, akkor **két connection is foglalt** lehet egyidejűleg ugyanarra a műveletre. Retry hurokban ez multiplikálódhat. + +### 3.3 🟡 `MsSqlDataContext` tranzakció timeout + +**Fájl**: `src/Common/Storage/Data/SnDataContext.cs`, `BeginTransaction()` + +A `TransactionWrapper` egy tranzakciót tart nyitva, ami alatt a connection **végig foglalt**. Ha a tranzakció timeout-ol vagy deadlock-ba kerül, a connection **a timeout lejártáig blokkolva marad**, mielőtt visszakerülne a pool-ba. + +### 3.4 🟡 Sync-over-async (`GetAwaiter().GetResult()`) + +**Fájlok**: +- `src/ContentRepository.MsSql/Packaging/Steps/InstallInitialData.cs` +- `src/ContentRepository/Packaging/Steps/Internal/CheckDatabaseConnection.cs` + +A `GetAwaiter().GetResult()` minta **deadlock-ot okozhat** ASP.NET kontextusban, ami azt eredményezi, hogy a connection **soha nem szabadul fel**, mert a continuation nem fut le. + +### 3.5 🟡 Bulk insert exception közben + +**Fájl**: `src/ContentRepository.MsSql/MsSqlDataInstaller.cs` + +Ha a `SqlBulkCopy.WriteToServerAsync` exception-t dob, a connection nem biztos, hogy megfelelően felszabadul, különösen ha az explicit tranzakció is rollback-re vár. + +### 3.6 🟢 Kézi `SqlConnection` kezelés (biztonságos) + +**Fájl**: `src/ContentRepository.MsSql/MsSqlDatabaseInstaller.cs` + +```csharp +private async Task ExecuteSqlCommandAsync(string sql, string connectionString) +{ + using (var cn = new SqlConnection(connectionString)) + using (var cmd = new SqlCommand(sql, cn)) + { + cmd.CommandType = CommandType.Text; + cn.Open(); + await cmd.ExecuteNonQueryAsync().ConfigureAwait(false); + } +} +``` + +Ez **rendben van** — a `using` blokk biztosítja, hogy exception esetén is felszabadul a connection. + +--- + +## 4. A legvalószínűbb root cause nagy terhelésnél + +### Forgatókönyv + +1. Sérülékenységi vizsgáló eszköz **N párhuzamos kérést** küld (N > 50-100) +2. Minden kérés létrehoz egy `SnDataContext`-et +3. Minden context megnyit egy `SqlConnection`-t +4. Az `ExecuteReaderAsync` callback-je **tartja a connection-t** amíg a válasz feldolgozása tart +5. Ha N eléri a pool limitet (alapértelmezetten 100), az új kérések **várakoznak** +6. A timeout (15 mp) lejárta után `InvalidOperationException` dobódik + +### Miért nem jelentkezik normál terhelésnél? + +Normál terhelésnél a kérések **szekvenciálisan vagy mérsékelt párhuzamossággal** érkeznek, a callback-ek gyorsan lefutnak, és a connection-ök visszakerülnek a pool-ba mielőtt az kimerülne. + +--- + +## 5. Diagnosztikai eszközök + +### 5.1 SQL Server oldali monitoring + +```sql +-- Aktuális connection pool állapot: +SELECT + DB_NAME(dbid) AS DatabaseName, + COUNT(dbid) AS NumberOfConnections, + loginame AS LoginName, + status +FROM sys.sysprocesses +WHERE dbid > 0 +GROUP BY dbid, loginame, status +ORDER BY NumberOfConnections DESC; + +-- Várakozó / blokkolt kapcsolatok: +SELECT + r.session_id, + r.blocking_session_id, + r.wait_type, + r.wait_time, + r.command, + t.text AS query_text +FROM sys.dm_exec_requests r +CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) t +WHERE r.blocking_session_id > 0; + +-- Connection-ök állapota az alkalmazás felhasználóhoz: +SELECT + c.session_id, + c.connect_time, + c.last_read, + c.last_write, + s.status, + s.host_name, + s.program_name, + s.login_name +FROM sys.dm_exec_connections c +JOIN sys.dm_exec_sessions s ON c.session_id = s.session_id +WHERE s.login_name = 'sensenet_app_user' -- az alkalmazás SQL user-e +ORDER BY c.connect_time; +``` + +### 5.2 .NET oldali monitoring + +```bash +# dotnet-counters a connection pool metrikák monitorozásához: +dotnet-counters monitor Microsoft.Data.SqlClient.EventSource \ + --counters active-hard-connections,active-soft-connections,\ + number-of-active-connection-pool-groups,\ + number-of-active-connection-pools,\ + number-of-free-connections,\ + number-of-stasis-connections +``` + +### 5.3 Connection string diagnosztikai beállítások + +``` +Server=...;Database=...; +Min Pool Size=0; +Max Pool Size=100; +Connection Lifetime=300; +Connection Timeout=30; +Application Name=SenseNetDiagnostics; +``` + +Az `Application Name` beállítás segít a SQL Server oldalon azonosítani az alkalmazás kapcsolatait a `sys.dm_exec_sessions.program_name` oszlopban. + +--- + +## 6. Javasolt javítások + +### 6.1 Azonnali (quick wins) + +| # | Javítás | Hatás | Erőfeszítés | +|---|---------|-------|-------------| +| 1 | **Connection string-ben `Max Pool Size` növelése** (pl. 200-ra) | Tüneti kezelés, de azonnali segítség | Konfig változás | +| 2 | **`Connection Lifetime=300` hozzáadása** a connection stringhez | Automatikus connection recycle 5 percenként | Konfig változás | +| 3 | **`dotnet-counters` monitoring** bekapcsolása éles környezetben | Láthatóság a pool állapotáról | Ops feladat | + +### 6.2 Középtávú (kódmódosítás) + +| # | Javítás | Érintett fájl | Erőfeszítés | +|---|---------|---------------|-------------| +| 4 | **`SnDataContext` Dispose ellenőrzés** — figyelmeztetés/log ha a context Dispose nélkül kerül GC-re | `src/Common/Storage/Data/SnDataContext.cs` | 1 nap | +| 5 | **`ExecuteReaderAsync` timeout** hozzáadása a callback-hez | `src/Common/Storage/Data/MsSqlClient/MsSqlDataContext.cs` | 1 nap | +| 6 | **Retry logika connection kezelés** auditálása — biztosítani, hogy retry előtt az előző connection dispose-olódjon | `src/Common/Storage/Data/SnDataContext.cs` | 2 nap | +| 7 | **`GetAwaiter().GetResult()` eliminálása** a packaging step-ekből | `src/ContentRepository.MsSql/Packaging/Steps/` | 2-3 nap | + +### 6.3 Hosszú távú (architekturális) + +| # | Javítás | Hatás | Erőfeszítés | +|---|---------|-------|-------------| +| 8 | **Connection-per-command modell** bevezetése — a connection ne a `SnDataContext` élettartamához legyen kötve, hanem minden SQL parancs saját connection-t nyisson és azonnal visszaadja | A pool terhelése drasztikusan csökken | 2-3 hét | +| 9 | **Connection pool monitoring middleware** — ASP.NET middleware ami logol ha a pool kihasználtsága >80% | Korai figyelmeztetés | 1-2 nap | +| 10 | **Rate limiting** middleware nagy terhelésű endpoint-okra | Védekezés a vizsgálatok jellegű terhelés ellen | 2-3 nap | + +### 6.4 GC-alapú Dispose ellenőrzés implementáció (4. pont részletezése) + +A következő minta figyelmeztet, ha egy `SnDataContext` Dispose nélkül kerül a GC-be: + +```csharp +public abstract class SnDataContext : IDisposable +{ + private bool _disposed; + private readonly string _creationStackTrace; + + protected SnDataContext() + { +#if DEBUG + _creationStackTrace = Environment.StackTrace; +#endif + } + + ~SnDataContext() + { + if (!_disposed) + { + // LOG: Connection leak detected! + var message = "SnDataContext was not disposed properly. " + + "This may cause connection pool exhaustion."; +#if DEBUG + message += $" Created at: {_creationStackTrace}"; +#endif + SnTrace.Database.WriteError(message); + + Dispose(false); + } + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + protected virtual void Dispose(bool disposing) + { + if (_disposed) return; + if (disposing) + { + _connection?.Dispose(); + _transaction?.Dispose(); + } + _disposed = true; + } +} +``` + +--- + +## 7. Összefoglalás + +| Prioritás | Probléma | Valószínűség | +|-----------|----------|-------------| +| 🔴 **Kritikus** | `ExecuteReaderAsync` callback tartja a connection-t → sok párhuzamos kérésnél pool kimerülés | **Nagyon magas** | +| 🔴 **Kritikus** | Retry logika többszörös connection foglalást okozhat | **Közepes** | +| 🟡 **Fontos** | Tranzakció timeout → connection blokkolva marad | **Közepes** | +| 🟡 **Fontos** | Sync-over-async deadlock → connection soha nem szabadul | **Alacsony-közepes** (csak packaging step-ekben) | +| 🟡 **Fontos** | Bulk insert exception → connection nem szabadul | **Alacsony** (csak telepítéskor) | + +A legvalószínűbb root cause: **sok párhuzamos kérés egyszerre tartja foglalva a connection-öket a callback-ek futása alatt**, és a pool (alapértelmezetten 100) kimerül. Az azonnali megoldás a pool limit növelése, de hosszú távon a connection kezelési modell felülvizsgálata szükséges. diff --git a/docs/notes/megvalosithatosagi-elemzes.md b/docs/notes/megvalosithatosagi-elemzes.md new file mode 100644 index 000000000..b775ea7a1 --- /dev/null +++ b/docs/notes/megvalosithatosagi-elemzes.md @@ -0,0 +1,719 @@ +# 🐘 Megvalósíthatósági elemzés: PostgreSQL provider létrehozása a sensenet MSSQL providerből + +## Tartalomjegyzék + +- [1. Architektúra áttekintése](#1-architektúra-áttekintése) +- [2. Érintett fájlok és kódmennyiség](#2-érintett-fájlok-és-kódmennyiség) +- [3. MSSQL → PostgreSQL SQL konverziók](#3-mssql--postgresql-sql-konverziók) +- [4. C# kód változások](#4-c-kód-változások) +- [5. Adatbázis-séma konverzió](#5-adatbázis-séma-konverzió) +- [6. Az absztrakt réteg szükséges javításai](#6-az-absztrakt-réteg-szükséges-javításai) +- [7. Munkaterv és becslés](#7-munkaterv-és-becslés) +- [8. Kockázatok és kihívások](#8-kockázatok-és-kihívások) +- [9. Javasolt projektstruktúra](#9-javasolt-projektstruktúra) +- [10. Összefoglalás](#10-összefoglalás) + +--- + +## 1. Architektúra áttekintése + +A sensenet adatelérési rétege **háromszintű öröklési hierarchiát** követ: + +``` +DataProvider (abstract, ~988 sor) ← adatbázis-független + ├── RelationalDataProviderBase (abstract, ~2759 sor) ← SQL-alapú, de ⚠️ MSSQL-szivárgás + │ └── MsSqlDataProvider (konkrét, ~630 + 1344 sor SQL) + └── InMemoryDataProvider (konkrét, tesztekhez) + +SnDataContext (abstract, ~226 sor) ← ⚠️ MSSQL-szivárgás + └── MsSqlDataContext (konkrét, ~225 sor) +``` + +> Az `InMemoryDataProvider` közvetlenül a `DataProvider`-ből származik, **átugorva** a `RelationalDataProviderBase`-t. Ez azt jelenti, hogy egy PostgreSQL provider számára a `RelationalDataProviderBase` a helyes szülőosztály. + +### Rétegek részletezése + +| Réteg | Projekt | Szerep | +|-------|---------|--------| +| **Common** | `SenseNet.Common` | Legalsó szintű absztrakciók (`SnDataContext`) | +| **BlobStorage** | `SenseNet.BlobStorage` | Blob interfészek + MSSQL implementáció együtt élnek | +| **Storage** | `SenseNet.Storage` | `DataProvider` → `RelationalDataProviderBase` (abstract) | +| **ContentRepository.MsSql** | `SenseNet.ContentRepository.MsSql` | MSSQL-specifikus `MsSqlDataProvider` | +| **ContentRepository.InMemory** | `SenseNet.ContentRepository.InMemory` | In-memory teszt provider | + +### DI wiring + +A `MsSqlExtensions.cs` (72 sor) regisztrálja az összes szatellit providert a DI containerben. + +--- + +## 2. Érintett fájlok és kódmennyiség + +### 🔴 Teljesen újraírandó fájlok (a PostgreSQL provider-ben) + +| Fájl | Sorok | Feladat | +|------|------:|---------| +| `MsSqlDataProvider.cs` | 630 | → `PgSqlDataProvider.cs` | +| `MsSqlDataProviderScripts.cs` | 1 344 | → **~70 SQL script** átírása PostgreSQL dialektusra | +| `MsSqlDataContext.cs` | 225 | → `PgSqlDataContext.cs` (Npgsql-lel) | +| `MsSqlDataInstaller.cs` | 489 | → PostgreSQL adatfeltöltő | +| `MsSqlDatabaseInstaller.cs` | 357 | → CREATE DATABASE PostgreSQL módra | +| `MsSqlSchemaInstaller.cs` | 197 | → Bulk insert Npgsql-lel | +| `MsSqlSchemaWriter.cs` | 113 | → Schema writer | +| `MsSqlExclusiveLockDataProvider.cs` | 189 | → PgSql exclusive lock | +| `MsSqlSharedLockDataProvider.cs` | 224 | → PgSql shared lock | +| `MsSqlStatisticalDataProvider.cs` | 423 | → PgSql statisztika | +| `MsSqlPackagingDataProvider.cs` | 427 | → PgSql packaging | +| `MsSqlAccessTokenDataProvider.cs` | 284 | → PgSql token kezelés | +| `MsSqlClientStoreDataProvider.cs` | 352 | → PgSql client store | +| `MsSqlExtensions.cs` | 72 | → PgSql DI regisztráció | +| `SqlScriptReader.cs` | 53 | → Nem kell (`GO` MSSQL-specifikus) | +| `Create_SenseNet_Database.sql` | 1 114 | → PostgreSQL DDL | +| `MsSqlInstall_Security.sql` | 120 | → PostgreSQL security DDL | +| **BlobStorage MsSql fájlok** (5 fájl) | ~1 208 | → PgSql blob kezelés | +| **Components/** (3 fájl) | ~160 | → PgSql component-ek | +| **Összesen** | **~7 980** | Teljes újraírás | + +### ⚠️ Javítandó az absztrakt rétegben (MSSQL szivárgás) + +| Fájl | Probléma | Javítás | +|------|----------|---------| +| `SnDataContext.cs` (Common) | `SqlException` referencia a `ShouldRetryOnError()`-ban | Absztrakt `IsRetriableException()` metódus bevezetése | +| `RelationalDataProviderBase.cs` (Storage) | `SqlException` elkapás az `IsDatabaseReady()`-ben (~3 helyen) | Virtual/abstract error handling delegálás | +| `SenseNet.Common.csproj` | `Microsoft.Data.SqlClient` NuGet hivatkozás | Opcionálissá tenni vagy kiemelni | +| BlobStorage regisztráció | Hardcoded `MsSqlBlobMetaDataProvider` default | Provider-agnosztikus default | +| Connection string key | `"SnCrMsSql"` hardcoded | → `"SensenetRepository"` (generikus név) | + +### ContentRepository.MsSql projekt teljes fájllistája (~6 800 sor) + +| Fájl | Sorok | Cél | +|------|------:|-----| +| `MsSqlDataProvider.cs` | 630 | Fő provider — query override-ok, exception kezelés, timestamp-ek | +| `MsSqlDataProviderScripts.cs` | 1 344 | ~40 SQL script property (partial class) | +| `MsSqlDataInstaller.cs` | 489 | Kezdeti adatok `SqlBulkCopy`-val | +| `MsSqlDatabaseInstaller.cs` | 357 | CREATE DATABASE, login-ok, role-ok | +| `MsSqlSchemaInstaller.cs` | 197 | Bulk-insert séma metaadatok | +| `MsSqlSchemaWriter.cs` | 113 | Delegálás a schema installernek | +| `MsSqlExclusiveLockDataProvider.cs` | 189 | Exclusive (app) lock-ok | +| `MsSqlSharedLockDataProvider.cs` | 224 | Shared content lock-ok | +| `MsSqlStatisticalDataProvider.cs` | 423 | Aggregáció / statisztika | +| `MsSqlPackagingDataProvider.cs` | 427 | Csomagkezelés | +| `MsSqlAccessTokenDataProvider.cs` | 284 | Token CRUD dinamikus collation-nel | +| `MsSqlClientStoreDataProvider.cs` | 352 | OAuth client/secret tár | +| `MsSqlExtensions.cs` | 72 | DI regisztráció | +| `SqlScriptReader.cs` | 53 | Script-ek szétválasztása `GO`-nál | +| `Components/MsSqlExclusiveLockComponent.cs` | ~50 | SnComponent patch | +| `Components/MsSqlStatisticsComponent.cs` | ~50 | SnComponent patch | +| `Components/MsSqlClientStoreComponent.cs` | ~60 | SnComponent patch | + +### BlobStorage MsSql réteg (~1 208 sor) + +| Fájl | Sorok | Cél | +|------|------:|-----| +| `MsSqlBlobMetaDataProvider.cs` | 677 | Blob metadata CRUD | +| `MsSqlBlobMetaDataProviderScripts.cs` | 216 | SQL scriptek blob műveletekhez | +| `BuiltInBlobProvider.cs` | 254 | `VARBINARY(MAX)` olvasás/írás | +| `MsSqlBlobProviderSelector.cs` | 33 | Provider kiválasztás | +| `MsSqlBlobProviderExtensions.cs` | 28 | DI regisztráció | + +--- + +## 3. MSSQL → PostgreSQL SQL konverziók + +### 3.1 Típus-megfeleltetések + +| MSSQL típus | PostgreSQL típus | Megjegyzés | +|-------------|-----------------|------------| +| `INT IDENTITY(1,1)` | `SERIAL` / `GENERATED ALWAYS AS IDENTITY` | | +| `NVARCHAR(n)` | `VARCHAR(n)` / `TEXT` | PostgreSQL natívan Unicode | +| `NVARCHAR(MAX)` | `TEXT` | | +| `NTEXT` | `TEXT` | | +| `VARBINARY(MAX)` | `BYTEA` | Vagy Large Objects (`lo`) nagy fájloknál | +| `BIT` | `BOOLEAN` | | +| `DATETIME2` | `TIMESTAMP` / `TIMESTAMPTZ` | | +| `TINYINT` | `SMALLINT` | PostgreSQL-ben nincs 1-byte integer | +| `ROWVERSION` / `TIMESTAMP` | Nincs közvetlen megfelelő | → `BIGINT` + trigger, vagy `xmin` system column | +| `MONEY` | `NUMERIC(19,4)` | | +| `IMAGE` | `BYTEA` | | + +### 3.2 SQL szintaxis konverziók (~70 script) + +| MSSQL funkció | PostgreSQL megfelelő | Érintett scriptek száma | +|---------------|---------------------|------------------------| +| `@@IDENTITY` | `RETURNING id` záradék | ~12 | +| `@@ROWCOUNT` | `GET DIAGNOSTICS row_count` | ~5 | +| `COLLATE Latin1_General_CI_AS` | `COLLATE "und-x-icu"` vagy `ILIKE` | ~8 | +| `STRING_SPLIT(value, ',')` | `string_to_array()` + `unnest()` | ~3 | +| `OUTPUT INSERTED.*` | `RETURNING *` | ~6 | +| `OUTPUT DELETED.*` | `DELETE ... RETURNING *` | ~2 | +| `GETUTCDATE()` | `NOW() AT TIME ZONE 'UTC'` / `CURRENT_TIMESTAMP` | ~8 | +| `DATEADD(minute, -X, GETUTCDATE())` | `NOW() - INTERVAL 'X minutes'` | ~5 | +| `DATALENGTH()` | `octet_length()` / `length()` | ~4 | +| `LEN()` | `length()` | ~2 | +| `CONVERT(type, expr)` | `CAST(expr AS type)` / `expr::type` | ~5 | +| `BEGIN TRY/CATCH` | `BEGIN...EXCEPTION WHEN` (PL/pgSQL) | ~4 | +| `ERROR_NUMBER()` IN (2601, 2627) | `SQLSTATE = '23505'` (unique_violation) | ~2 | +| `RAISERROR` | `RAISE EXCEPTION` | ~2 | +| `WITH (NOLOCK)` | Eltávolítható (MVCC natív) | ~3 | +| `WITH (TABLOCK)` / `(TABLOCKX)` | `LOCK TABLE ... IN ...` | ~2 | +| `CURSOR DECLARE/FETCH` | PL/pgSQL `FOR rec IN query LOOP` | ~3 | +| `ROW_NUMBER() OVER(...)` CTE | Ugyanaz (standard SQL) ✅ | ~2 | +| `INFORMATION_SCHEMA.TABLES` | Ugyanaz ✅ | ~1 | +| `OBJECT_ID('table', 'U')` | `pg_class` / `to_regclass('table')` | ~2 | +| `sys.tables/columns/identity_columns` | `information_schema.*` / `pg_catalog.*` | ~4 | +| `sys.server_principals/databases` | `pg_roles` / `pg_database` | ~2 | +| `sp_addrolemember` | `GRANT role TO user` | ~1 | +| `TRUNCATE TABLE` | Ugyanaz ✅ | ~2 | +| `GO` batch separator | Nem szükséges | SqlScriptReader kihagyható | +| `PRINT` | `RAISE NOTICE` | ~3 | +| `N'string'` prefix | Nem szükséges (natív Unicode) | ~20+ | +| `TOP n` | `LIMIT n` | ~5 | +| Tábla-változók `DECLARE @t TABLE(...)` | `TEMP TABLE` / CTE | ~2 | +| `LIKE ... ESCAPE '\'` | Ugyanaz ✅ (de `\\` PostgreSQL-ben) | ~3 | + +### 3.3 A `ROWVERSION` probléma — a legnagyobb kihívás + +Az MSSQL `rowversion` (korábbi nevén `timestamp`) egy **automatikusan inkrementálódó 8-byte bináris érték**, amely minden UPDATE-nél változik. A sensenet ezt használja **optimistic concurrency control**-ra: `NodeTimestamp` és `VersionTimestamp`. + +PostgreSQL-ben **nincs közvetlen megfelelő**. Lehetséges megoldások: + +| Megoldás | Előny | Hátrány | +|----------|-------|---------| +| `BIGINT` + trigger | Hű viselkedés | Trigger overhead, manuális karbantartás | +| `xmin` system column | Zero overhead | Nem stabil tranzakciók között, nem exportálható | +| Application-level version | Egyszerű | Minden UPDATE-nél manuálisan kell növelni | +| `pg_advisory_lock` | Erős lock | Szemantikailag más | + +**Javasolt megoldás**: `BIGINT` oszlop + `BEFORE UPDATE` trigger, ami automatikusan növeli az értéket: + +```sql +CREATE SEQUENCE global_timestamp_seq; + +CREATE OR REPLACE FUNCTION update_timestamp_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW."Timestamp" = nextval('global_timestamp_seq'); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Alkalmazás minden érintett táblára: +CREATE TRIGGER trg_nodes_timestamp +BEFORE UPDATE ON "Nodes" +FOR EACH ROW EXECUTE FUNCTION update_timestamp_column(); +``` + +### 3.4 Teljes SQL script katalógus + +#### MsSqlDataProviderScripts.cs (~40 script property) + +| Script Property | MSSQL-specifikus elemek | +|----------------|------------------------| +| `InsertNodeAndVersionScript` | `@@IDENTITY`, `DECLARE @table AS TABLE(...)`, `OUTPUT INSERTED.*` | +| `UpdateNodeScript` / `UpdateVersionScript` | rowversion összehasonlítás, `@@ROWCOUNT` | +| `CopyVersionFromScript` | `@@IDENTITY`, rowversion | +| `DeleteNodeScript` | Multi-step FK sorrenddel | +| `MoveNodeScript` | `COLLATE Latin1_General_CI_AS`, `REPLACE()`, `LIKE...ESCAPE '\'` | +| `QueryNodesByTypeAndPathAndNameScript` | `COLLATE Latin1_General_CI_AS`, `TOP`, `LIKE` escape-pel | +| `LoadNodesScript` | Multi-result-set reader, `STRING_SPLIT()`, `NVARCHAR(MAX)` cast | +| `LoadNodeHeadScript` / `LoadNodeHeadByMembersScript` | `STRING_SPLIT()` | +| `GetTreeSizeScript` | `DATALENGTH()`, `SUM()`, CTE-k | +| `LoadNodeHeadsByPathAndNameScript` | `COLLATE Latin1_General_CI_AS` | +| TreeLock scriptek (Acquire, IsLocked, Release, GetAll, Delete) | `GETUTCDATE()`, `DATEADD()`, `@@IDENTITY` | +| IndexDocument scriptek (Save, Load) | Standard INSERT/SELECT | +| IndexingActivity scriptek (Load, Register, Update, Delete, GetLastId) | `@@IDENTITY`, CTE-k `ROW_NUMBER() OVER(...)`, `@@TRANCOUNT`, `BEGIN TRAN`/`COMMIT TRAN`, `CURSOR`, `@@FETCH_STATUS`, `RAISERROR` | +| `GetNodeTimestampScript` / `GetVersionTimestampScript` | rowversion | +| `AppModelScript` | `COLLATE Latin1_General_CI_AS` | +| SchemaModification scriptek (Load, Start/Finish) | rowversion, `@@ROWCOUNT` | +| `LoggingScript` (WriteAuditEvent) | `@@IDENTITY`, `GETUTCDATE()` | +| Provider tool scriptek (GetNameOfLastNode, LoadChildTypesToAllow, stb.) | `NOLOCK` hint, `CURSOR`, `WITH (TABLOCK)`, `WITH (TABLOCKX)` | +| `GetDatabaseUsageScript` | `DATALENGTH()`, multi-result-set, MSSQL metadata (`sys.tables`, `sys.identity_columns`) | +| `GetHealthScript` | Standard SELECT | + +#### Szatellit provider SQL (C# fájlokba beágyazva) + +| Provider | SQL műveletek | MSSQL funkciók | +|----------|--------------|----------------| +| ExclusiveLock | Acquire, Refresh, Release, IsLocked, Create Table | `OUTPUT INSERTED.*`, `WITH (NOLOCK)`, `OBJECT_ID('...', 'U')`, `CREATE INDEX ... ON [PRIMARY]` | +| SharedLock | Create, Refresh, Modify, Get, Delete, Cleanup | `GETUTCDATE()`, `DATEADD()`, `TRUNCATE TABLE` | +| Statistical | WriteData, LoadAggregation, CleanupAggregation, EnumerateData, WriteAggregation, LoadFirst/LastAggregationTimeStamp, LoadUsagePeriod | `GETUTCDATE()`, `datetime2(7)`, `BEGIN TRY/END TRY BEGIN CATCH/END CATCH` (upsert), `TEXTIMAGE_ON [PRIMARY]` | +| Packaging | LoadInstalled/Incomplete Components, LoadPackages, SavePackage, UpdatePackage, PackageExistence, DeletePackage, DeleteAll, LoadManifest, GetContentPathsWhereTheyAreAllowedChildren | `@@IDENTITY`, `TRUNCATE TABLE` | +| AccessToken | Create, GetById/Value, Exists, GetAll, Update, Delete, DeleteByUser/Content, Cleanup, Create Table | `OUTPUT INSERTED.*`, `@@IDENTITY`, `GETUTCDATE()`, dinamikus collation (`sys.columns`/`sys.tables` → `_CI_` → `_CS_`) | +| ClientStore | GetAll/ById, Upsert, Delete, DeleteByHost, SaveSecret, Create Table | `BEGIN TRY INSERT / END TRY BEGIN CATCH IF ERROR_NUMBER() IN (2601, 2627) UPDATE END CATCH` (upsert), `FOREIGN KEY`, komplex index definíciók | + +#### BlobStorage SQL (MsSqlBlobMetaDataProviderScripts.cs) + +| Művelet | MSSQL funkciók | +|---------|----------------| +| InsertBinaryProperty | `@@IDENTITY`, `UPDATE...OUTPUT DELETED.*` | +| DeleteBinaryProperty | Standard DELETE | +| InsertStagingBinary | `@@IDENTITY`, `CONVERT(varbinary, '')` | +| UpdateStream / WriteStagingChunk | `VARBINARY(MAX)` kezelés | +| CommitChunk | `DATALENGTH()` | + +--- + +## 4. C# kód változások + +### 4.1 NuGet csomagok + +| Jelenlegi (MSSQL) | Szükséges (PostgreSQL) | +|-------------------|----------------------| +| `Microsoft.Data.SqlClient` | `Npgsql` (≥8.0) | +| — | `Npgsql.EntityFrameworkCore.PostgreSQL` (opcionális) | + +### 4.2 ADO.NET osztály-megfeleltetések + +| MSSQL (`Microsoft.Data.SqlClient`) | PostgreSQL (`Npgsql`) | +|------------------------------------|----------------------| +| `SqlConnection` | `NpgsqlConnection` | +| `SqlCommand` | `NpgsqlCommand` | +| `SqlDataReader` | `NpgsqlDataReader` | +| `SqlParameter` | `NpgsqlParameter` | +| `SqlTransaction` | `NpgsqlTransaction` | +| `SqlException` | `NpgsqlException` / `PostgresException` | +| `SqlBulkCopy` | Nincs → `COPY` command / `NpgsqlBinaryImporter` | +| `SqlConnectionStringBuilder` | `NpgsqlConnectionStringBuilder` | +| `SqlDbType` | `NpgsqlDbType` | + +### 4.3 SqlBulkCopy → PostgreSQL COPY + +Az `MsSqlSchemaInstaller` és `MsSqlDataInstaller` `SqlBulkCopy`-t használnak tömeges adatbetöltéshez. PostgreSQL-ben ennek megfelelője: + +```csharp +// MSSQL: +using var bulkCopy = new SqlBulkCopy(connection, + SqlBulkCopyOptions.TableLock | SqlBulkCopyOptions.KeepIdentity, transaction); +bulkCopy.DestinationTableName = "Nodes"; +bulkCopy.WriteToServer(dataTable); + +// PostgreSQL: +await using var writer = await connection.BeginBinaryImportAsync( + "COPY \"Nodes\" (\"NodeId\", \"NodeTypeId\", \"Name\", \"Path\") FROM STDIN (FORMAT BINARY)"); +foreach (var row in data) { + await writer.StartRowAsync(); + await writer.WriteAsync(row.NodeId, NpgsqlDbType.Integer); + await writer.WriteAsync(row.NodeTypeId, NpgsqlDbType.Integer); + await writer.WriteAsync(row.Name, NpgsqlDbType.Varchar); + await writer.WriteAsync(row.Path, NpgsqlDbType.Varchar); +} +await writer.CompleteAsync(); +``` + +### 4.4 Hibakezelés konverzió + +```csharp +// MSSQL: +catch (SqlException ex) when (ex.Number == 2627) { /* unique violation */ } +catch (SqlException ex) when (ex.Number == 1205) { /* deadlock */ } +catch (SqlException ex) when (ex.Number == 4060 || ex.Number == 233) { /* db not ready */ } + +// PostgreSQL: +catch (PostgresException ex) when (ex.SqlState == "23505") { /* unique violation */ } +catch (PostgresException ex) when (ex.SqlState == "40P01") { /* deadlock */ } +catch (NpgsqlException ex) when (ex.InnerException is SocketException) { /* db not ready */ } +``` + +### 4.5 Blob kezelés + +Az MSSQL `VARBINARY(MAX)` → PostgreSQL `BYTEA` konverzió viszonylag egyszerű kis fájloknál, de nagy fájloknál (>1GB) a PostgreSQL Large Objects API-t érdemes használni: + +```csharp +// MSSQL (BuiltInBlobProvider): +cmd.CommandText = "SELECT Stream FROM Files WHERE FileId = @Id"; +reader.GetBytes(0, offset, buffer, 0, count); + +// PostgreSQL (kis fájlok - BYTEA): +cmd.CommandText = "SELECT \"Stream\" FROM \"Files\" WHERE \"FileId\" = @id"; +// GetBytes() működik BYTEA-val is Npgsql-ben + +// PostgreSQL (nagy fájlok - Large Objects): +var manager = new NpgsqlLargeObjectManager(connection); +using var stream = await manager.OpenReadAsync(oid); +``` + +### 4.6 MSSQL-specifikus C# API használat a jelenlegi kódban + +| Típus | Használva itt | +|-------|---------------| +| `SqlConnection` | MsSqlDataContext, MsSqlDatabaseInstaller, MsSqlDataInstaller, MsSqlSchemaInstaller, BuiltInBlobProvider | +| `SqlCommand` | MsSqlDataContext, MsSqlDatabaseInstaller, MsSqlDataInstaller, MsSqlSchemaInstaller | +| `SqlDataReader` | MsSqlDataContext | +| `SqlParameter` | MsSqlDataContext, MsSqlSchemaInstaller | +| `SqlConnectionStringBuilder` | MsSqlDatabaseInstaller | +| `SqlTransaction` | MsSqlDataContext, MsSqlDatabaseInstaller | +| `SqlBulkCopy` | MsSqlDataInstaller, MsSqlSchemaInstaller | +| `SqlException` | MsSqlDataProvider, MsSqlDatabaseInstaller, **SnDataContext** ⚠️, **RelationalDataProviderBase** ⚠️ | +| `SqlInfoMessageEventArgs` | MsSqlDataContext (overloaded), MsSqlStatisticalDataProvider | +| `CommandBehavior` | MsSqlDataContext (SequentialAccess) | + +--- + +## 5. Adatbázis-séma konverzió + +### 5.1 Adatbázis-séma (jelenlegi MSSQL) + +#### Core táblák (MsSqlInstall_Schema.sql) + +| Tábla | Kulcs oszlopok / Megjegyzések | +|-------|-------------------------------| +| `Nodes` | `NodeId` (PK, IDENTITY), `Name`, `Path` (NVARCHAR 450, unique ix), `Timestamp` (rowversion) | +| `Versions` | `VersionId` (PK, IDENTITY), `NodeId` (FK→Nodes), `MajorNumber`, `MinorNumber`, `ChangedData` (NTEXT), `Timestamp` (rowversion) | +| `BinaryProperties` | `BinaryPropertyId` (PK, IDENTITY), `VersionId` (FK→Versions), `FileId` (FK→Files) | +| `Files` | `FileId` (PK, IDENTITY), `ContentType`, `FileNameWithoutExtension`, `Extension`, `Size` (BIGINT), `Stream` (VARBINARY(MAX)), `Staging` (BIT) | +| `LongTextProperties` | `LongTextPropertyId`, `VersionId`, `PropertyTypeId`, `Value` (NTEXT) | +| `ReferenceProperties` | `ReferencePropertyId`, `VersionId`, `PropertyTypeId`, `ReferredNodeId` | +| `IndexingActivities` | `IndexingActivityId` (PK, IDENTITY), `ActivityType`, `CreationDate`, `RunningState`, `NodeId`, `VersionId`, `Path` | +| `TreeLocks` | `TreeLockId`, `Path`, `LockedAt` | +| `LogEntries` | `LogId` (PK, IDENTITY), `EventId`, `Category`, `Priority`, `FormattedMessage`, `Title`, stb. | +| `Packages` | `Id` (PK, IDENTITY), `PackageType`, `ComponentId`, `ComponentVersion`, `Description`, `Manifest` | +| `SchemaModification` | `SchemaModificationId`, `ModificationDate`, `Timestamp` (rowversion) | +| `JournalItems` | `Id`, `When`, `Wherewith`, `What` | +| `WorkflowNotification` | `NotificationId`, `NodeId`, `WorkflowInstanceId`, `WorkflowNodePath` | + +#### Security táblák (MsSqlInstall_Security.sql) + +| Tábla | Kulcs oszlopok | +|-------|----------------| +| `EFEntities` | `Id` (PK), `OwnerId`, `ParentId` (FK→self), `IsInherited` | +| `EFEntries` | `EFEntityId` (FK), `EntryType`, `IdentityId`, `LocalOnly`, összetett PK | +| `EFMemberships` | `GroupId`, `MemberId`, `IsUser`, összetett PK | +| `EFMessages` | `Id` (PK, IDENTITY), `SavedBy`, `SavedAt`, `ExecutionState`, `LockedBy`, `LockedAt`, `Body` | + +#### Szatellit táblák (Component patch-ek hozzák létre) + +| Tábla | Létrehozva | +|-------|-----------| +| `ExclusiveLocks` | MsSqlExclusiveLockComponent | +| `StatisticalUsage` | MsSqlStatisticsComponent | +| `ClientApps` | MsSqlClientStoreComponent | +| `ClientSecrets` | MsSqlClientStoreComponent | +| `AccessTokens` | MsSqlAccessTokenDataProvider (inline) | +| `SharedLocks` | MsSqlSharedLockDataProvider (inline) | + +#### View-k + +- `NodeInfoView` — Nodes ↔ Versions ↔ NodeTypes join +- `ReferencesInfoView` — ReferenceProperties ↔ PropertyTypes ↔ Nodes join +- `PermissionInfoView` — EFEntries ↔ EFEntities ↔ Nodes join +- `MembershipInfoView` — EFMemberships ↔ Nodes join + +#### Adatbázis-szintű beállítások (MsSqlDatabaseInstaller alkalmazása) + +A telepítő 20+ `ALTER DATABASE` utasítást futtat, többek között: +`ALLOW_SNAPSHOT_ISOLATION ON`, `READ_COMMITTED_SNAPSHOT ON`, `RECOVERY SIMPLE`, `PARAMETERIZATION FORCED`, `PAGE_VERIFY CHECKSUM`, `AUTO_CREATE_STATISTICS ON`, `AUTO_UPDATE_STATISTICS ON`, `AUTO_SHRINK OFF`. + +### 5.2 Fő táblák PostgreSQL átalakítása (példa) + +```sql +-- MSSQL: +CREATE TABLE [Nodes] ( + [NodeId] INT IDENTITY (1, 1) NOT NULL, + [NodeTypeId] INT NOT NULL, + [Name] NVARCHAR(450) NOT NULL, + [Path] NVARCHAR(450) NOT NULL, + [Timestamp] ROWVERSION NOT NULL, + [IsDeleted] TINYINT NULL, + ... + CONSTRAINT [PK_Nodes] PRIMARY KEY CLUSTERED ([NodeId]) +); + +-- PostgreSQL: +CREATE SEQUENCE global_timestamp_seq; + +CREATE TABLE "Nodes" ( + "NodeId" SERIAL PRIMARY KEY, + "NodeTypeId" INTEGER NOT NULL, + "Name" VARCHAR(450) NOT NULL, + "Path" VARCHAR(450) NOT NULL, + "Timestamp" BIGINT NOT NULL DEFAULT nextval('global_timestamp_seq'), + "IsDeleted" SMALLINT NULL, + ... +); + +CREATE TRIGGER trg_nodes_timestamp +BEFORE UPDATE ON "Nodes" +FOR EACH ROW EXECUTE FUNCTION update_timestamp_column(); +``` + +### 5.3 Elnevezési konvenció + +Az MSSQL PascalCase konvenciót használ (`NodeId`, `NodeTypeId`), míg PostgreSQL-ben a konvenció snake_case (`node_id`, `node_type_id`). **Két lehetőség:** + +1. **Megtartani a PascalCase-t** idézőjelekkel (`"NodeId"`) — egyszerűbb migráció, de nem idiomatikus +2. **Átírni snake_case-re** — idiomatikus, de az összes C# kódban is módosítani kell az oszlopneveket + +**Javasolt**: Megtartani a PascalCase-t idézőjelekkel a kompatibilitás érdekében, mivel ~70 SQL scriptben hivatkoznak rá, és a `RelationalDataProviderBase` is használja az oszlopneveket a result set olvasásakor. + +### 5.4 PostgreSQL adatbázis-szintű beállítások (MSSQL megfelelők) + +| MSSQL beállítás | PostgreSQL megfelelő | +|----------------|---------------------| +| `ALLOW_SNAPSHOT_ISOLATION ON` | Nincs szükség rá — PostgreSQL natívan MVCC | +| `READ_COMMITTED_SNAPSHOT ON` | Alap viselkedés PostgreSQL-ben | +| `RECOVERY SIMPLE` | `wal_level = minimal` (nem ajánlott prodban) | +| `PARAMETERIZATION FORCED` | `plan_cache_mode = force_generic_plan` (PG14+) | +| `PAGE_VERIFY CHECKSUM` | `data_checksums = on` (initdb-nél) | +| `AUTO_CREATE_STATISTICS ON` | Alapértelmezett — `autovacuum` kezeli | +| `AUTO_UPDATE_STATISTICS ON` | Alapértelmezett | +| `AUTO_SHRINK OFF` | Nincs ilyen koncepció | + +--- + +## 6. Az absztrakt réteg szükséges javításai + +### 6.1 `SnDataContext` — SqlException szivárgás megszüntetése + +```csharp +// Jelenlegi (hibás - MSSQL-specifikus az abstract base-ben): +protected virtual bool ShouldRetryOnError(Exception ex) +{ + return (ex is SqlException && ex.Message.Contains("A network-related...")); +} + +// Javított: +protected abstract bool ShouldRetryOnError(Exception ex); +// Vagy: +protected virtual bool ShouldRetryOnError(Exception ex) => false; +``` + +### 6.2 `RelationalDataProviderBase` — `IsDatabaseReady()` javítás + +```csharp +// Jelenlegi (hibás): +catch (SqlException ex) when (ex.Number == 4060 || ex.Number == 233) { ... } + +// Javított: +catch (Exception ex) when (IsDatabaseNotReadyException(ex)) { ... } +protected abstract bool IsDatabaseNotReadyException(Exception ex); +``` + +### 6.3 `Microsoft.Data.SqlClient` eltávolítása a Common projektből + +A `SenseNet.Common.csproj`-ból ki kell venni a `Microsoft.Data.SqlClient` függőséget, és az `MsSqlDataContext`-et (ami jelenleg itt él!) át kell helyezni a `ContentRepository.MsSql` projektbe. + +### 6.4 Tranzakció-kezelési minták + +A jelenlegi kódban a tranzakciókezelés három szinten történik: + +1. **SnDataContext-managed tranzakciók**: A legtöbb provider `BeginTransaction()` → műveletek → `Commit()`. A context `SqlTransaction`-t wrappel. +2. **SQL-szintű tranzakciók**: Egyes scriptek `BEGIN TRAN` / `COMMIT TRAN` inline-t használnak (IndexingActivity register/update scriptek) `@@TRANCOUNT` ellenőrzéssel. +3. **SqlBulkCopy tranzakciók**: `MsSqlSchemaInstaller` explicit `SqlTransaction`-t hoz létre és átadja a `SqlBulkCopy`-nak. + +### 6.5 Retry logika + +A `MsSqlDataContext` connection pool hibáknál és SQL Server hálózati hibáknál retry-ol az `IRetrier`-en keresztül (`SqlException` ellenőrzéssel). + +### 6.6 Upsert minták + +Két megközelítés a jelenlegi kódban: +- `BEGIN TRY INSERT / END TRY BEGIN CATCH UPDATE END CATCH` az `ERROR_NUMBER()` ellenőrzéssel (ClientStore, Statistical) +- `OUTPUT INSERTED.*` az UPDATE...FROM subquery-vel (ExclusiveLock acquire) + +PostgreSQL-ben mindkettő egyszerűsíthető: +```sql +INSERT INTO ... ON CONFLICT (...) DO UPDATE SET ... +RETURNING *; +``` + +### 6.7 Connection string kezelés + +- `MsSqlDataContext` a konstruktoron keresztül kapja a connection stringet és `SqlConnectionStringBuilder`-t használ. +- `MsSqlDatabaseInstaller` `SqlConnectionStringBuilder`-rel **lecseréli az initial catalog-ot** (átlép `master` db-re a CREATE DATABASE-hez, majd vissza a cél db-re). +- A connection string `IOptions` DI-on keresztül van konfigurálva. + +### 6.8 Stored Procedures + +**Nincs felhasználó által definiált stored procedure** a kódbázisban. Minden SQL inline. Viszont: +- `sp_addrolemember` (rendszer SP) hívás a `MsSqlDatabaseInstaller`-ben a `db_owner` role kiosztásához. + +--- + +## 7. Munkaterv és becslés + +### Fázis 1: Absztrakt réteg tisztítása (előfeltétel) + +| Feladat | Becsült idő | +|---------|------------| +| `SnDataContext` MSSQL szivárgás javítása | 2-3 nap | +| `RelationalDataProviderBase` MSSQL szivárgás javítása | 2-3 nap | +| `MsSqlDataContext` áthelyezése Common → MsSql projektbe | 1-2 nap | +| BlobStorage regisztráció generikussá tétele | 1 nap | +| Connection string key generikussá tétele | 0.5 nap | +| Tesztek futtatása, regresszió ellenőrzés | 2-3 nap | +| **Fázis 1 összesen** | **~9-12 nap** | + +### Fázis 2: PostgreSQL provider core + +| Feladat | Becsült idő | +|---------|------------| +| `PgSqlDataContext` implementálása (Npgsql) | 2-3 nap | +| DDL scriptek átírása (CREATE TABLE, indexek, FK-k) | 3-5 nap | +| ROWVERSION pótlása (sequence + trigger) | 2-3 nap | +| `PgSqlDataProvider` + 70 SQL script átírása | 8-12 nap | +| `PgSqlDataInstaller` (COPY-alapú bulk insert) | 2-3 nap | +| `PgSqlDatabaseInstaller` (CREATE DATABASE PG-módra) | 1-2 nap | +| **Fázis 2 összesen** | **~18-28 nap** | + +### Fázis 3: Szatellit providerek + +| Feladat | Becsült idő | +|---------|------------| +| `PgSqlExclusiveLockDataProvider` | 1-2 nap | +| `PgSqlSharedLockDataProvider` | 1-2 nap | +| `PgSqlStatisticalDataProvider` | 2-3 nap | +| `PgSqlPackagingDataProvider` | 2-3 nap | +| `PgSqlAccessTokenDataProvider` | 1-2 nap | +| `PgSqlClientStoreDataProvider` | 1-2 nap | +| **Fázis 3 összesen** | **~8-14 nap** | + +### Fázis 4: Blob storage + +| Feladat | Becsült idő | +|---------|------------| +| `PgSqlBlobMetaDataProvider` | 2-3 nap | +| `PgSqlBuiltInBlobProvider` (BYTEA/Large Objects) | 3-5 nap | +| Chunked upload/download tesztelés | 2-3 nap | +| **Fázis 4 összesen** | **~7-11 nap** | + +### Fázis 5: Tesztelés és integráció + +| Feladat | Becsült idő | +|---------|------------| +| Test platform létrehozása (`PgSqlPlatform`) | 1-2 nap | +| Integrációs tesztek futtatása és javítás | 5-10 nap | +| Docker compose PostgreSQL-lel | 1-2 nap | +| Teljesítmény tesztelés | 3-5 nap | +| CI/CD pipeline bővítés | 1-2 nap | +| **Fázis 5 összesen** | **~11-21 nap** | + +### 📊 Összesítés + +| | Optimista | Pesszimista | +|--|-----------|-------------| +| **Teljes idő** | **~53 nap** (~10.5 hét) | **~86 nap** (~17 hét) | +| **Érintett/új fájlok** | **~29 fájl** | | +| **Érintett kódsorok** | **~12 500 sor** | | + +--- + +## 8. Kockázatok és kihívások + +| Kockázat | Súlyosság | Mitigáció | +|----------|-----------|-----------| +| `ROWVERSION` pótlása nem triviális — race condition lehetőség | 🔴 Magas | Alapos tesztelés, `pg_advisory_lock` fallback | +| `COLLATE` viselkedés eltér (case-insensitive keresés) | 🟡 Közepes | `CITEXT` extension vagy `ILIKE` használata | +| `SqlBulkCopy` → `COPY` átalakítás nem 1:1 | 🟡 Közepes | `NpgsqlBinaryImporter` jól dokumentált | +| Blob kezelés nagy fájloknál (>1GB) | 🟡 Közepes | Large Objects API | +| Absztrakt réteg módosítása megtörheti az MSSQL providert | 🔴 Magas | Regressziós tesztek futtatása minden lépésnél | +| Tranzakció-kezelés eltérései (nested transactions) | 🟡 Közepes | PostgreSQL `SAVEPOINT` használata | +| A ~70 SQL script manuális konverziója hibalehetőség | 🔴 Magas | Script-enkénti unit tesztek | +| `STRING_SPLIT()` és egyéb MSSQL 2016+ funkciók | 🟢 Alacsony | PostgreSQL-ben natív alternatívák vannak | +| Az InMemory provider nem fedi le az összes relációs edge-case-t | 🟡 Közepes | PostgreSQL-specifikus integrációs tesztek kellenek | +| MSSQL-specifikus error code-ok (deadlock 1205, unique 2627, stb.) | 🟡 Közepes | PostgreSQL SQLSTATE kódok jól dokumentáltak | + +--- + +## 9. Javasolt projektstruktúra + +``` +src/ +├── ContentRepository.MsSql/ ← meglévő (változatlan) +│ ├── MsSqlDataProvider.cs +│ ├── MsSqlDataProviderScripts.cs +│ ├── MsSqlDataContext.cs ← ide áthelyezve Common-ból (Fázis 1) +│ ├── MsSqlDataInstaller.cs +│ ├── MsSqlDatabaseInstaller.cs +│ ├── MsSqlSchemaInstaller.cs +│ ├── MsSqlSchemaWriter.cs +│ ├── MsSqlExclusiveLockDataProvider.cs +│ ├── MsSqlSharedLockDataProvider.cs +│ ├── MsSqlStatisticalDataProvider.cs +│ ├── MsSqlPackagingDataProvider.cs +│ ├── MsSqlAccessTokenDataProvider.cs +│ ├── MsSqlClientStoreDataProvider.cs +│ ├── MsSqlExtensions.cs +│ ├── SqlScriptReader.cs +│ ├── Components/ +│ └── Scripts/ +│ +├── ContentRepository.PostgreSql/ ← ÚJ PROJEKT +│ ├── PgSqlDataProvider.cs +│ ├── PgSqlDataProviderScripts.cs +│ ├── PgSqlDataContext.cs +│ ├── PgSqlDataInstaller.cs +│ ├── PgSqlDatabaseInstaller.cs +│ ├── PgSqlSchemaInstaller.cs +│ ├── PgSqlSchemaWriter.cs +│ ├── PgSqlExclusiveLockDataProvider.cs +│ ├── PgSqlSharedLockDataProvider.cs +│ ├── PgSqlStatisticalDataProvider.cs +│ ├── PgSqlPackagingDataProvider.cs +│ ├── PgSqlAccessTokenDataProvider.cs +│ ├── PgSqlClientStoreDataProvider.cs +│ ├── PgSqlExtensions.cs +│ ├── Components/ +│ │ ├── PgSqlExclusiveLockComponent.cs +│ │ ├── PgSqlStatisticsComponent.cs +│ │ └── PgSqlClientStoreComponent.cs +│ ├── Scripts/ +│ │ ├── Create_SenseNet_PostgreSql_Database.sql +│ │ └── PgSqlInstall_Security.sql +│ └── SenseNet.ContentRepository.PostgreSql.csproj +│ +├── BlobStorage/ +│ └── Data/ +│ ├── MsSqlClient/ ← meglévő +│ │ ├── MsSqlBlobMetaDataProvider.cs +│ │ ├── BuiltInBlobProvider.cs +│ │ └── ... +│ └── PgSqlClient/ ← ÚJ +│ ├── PgSqlBlobMetaDataProvider.cs +│ ├── PgSqlBlobMetaDataProviderScripts.cs +│ ├── PgSqlBuiltInBlobProvider.cs +│ ├── PgSqlBlobProviderSelector.cs +│ └── PgSqlBlobProviderExtensions.cs +│ +├── Common/ +│ └── Storage/Data/ +│ └── SnDataContext.cs ← javítva (SqlException eltávolítva) +│ +├── Storage/ +│ └── Data/ +│ └── RelationalDataProviderBase.cs ← javítva (SqlException eltávolítva) +│ +└── Tests/ + └── SenseNet.IntegrationTests.PostgreSql/ ← ÚJ + ├── Platforms/ + │ └── PgSqlPlatform.cs + └── ... +``` + +--- + +## 10. Összefoglalás + +A PostgreSQL provider létrehozásához **~12 500 sor kódot** kell írni/átírni **~29 fájlban**, plusz javítani az absztrakt réteg MSSQL-szivárgásait. A legnagyobb kihívások: + +1. **70 SQL script** átírása PostgreSQL dialektusra (különösen az `@@IDENTITY` → `RETURNING`, `ROWVERSION` pótlása, és a `COLLATE` kezelés) +2. **Az absztrakt réteg megtisztítása** az MSSQL-specifikus kódrészletektől (`SqlException`, `MsSqlDataContext` áthelyezése) +3. **A blob storage** PostgreSQL-re portolása (BYTEA vs Large Objects döntés) +4. **Integrációs tesztelés** — a meglévő teszt-infrastruktúra jól felépített, egy `PgSqlPlatform` létrehozásával a legtöbb teszt futtatható lesz + +A projekt **reálisan 12-17 hét** egy tapasztalt fejlesztő számára, az **előfeltétel** az absztrakt réteg megtisztítása, ami önmagában ~2 hét. + +### Pozitívumok + +- ✅ Az architektúra **alapvetően jól strukturált** — a `DataProvider` → `RelationalDataProviderBase` → konkrét provider hierarchia működik +- ✅ Minden SQL **inline string**, nem stored procedure — könnyű megtalálni és átírni +- ✅ A tesztelési infrastruktúra (`IPlatform`, `IntegrationTest`) jól támogatja az új platformok hozzáadását +- ✅ Az `InMemoryDataProvider` precedenst teremt alternatív provider implementálására +- ✅ PostgreSQL-ben az MSSQL-specifikus funkciók szinte mindegyikére van natív alternatíva + +### Negatívumok + +- ❌ Az absztrakt réteg (`SnDataContext`, `RelationalDataProviderBase`) **MSSQL-szivárgást** tartalmaz +- ❌ A `MsSqlDataContext` a **Common** projektben él, nem az MsSql projektben +- ❌ A `ROWVERSION` pótlása **nem triviális** és potenciálisan race condition-ökhöz vezethet +- ❌ A blob storage réteg **szorosan csatolt** az MSSQL-hez +- ❌ Nincs egységes migrációs keretrendszer — a séma evolúció Component patch-eken keresztül történik, mindegyik saját DDL-lel diff --git a/docs/notes/teljesitmeny-osszehasonlitas.md b/docs/notes/teljesitmeny-osszehasonlitas.md new file mode 100644 index 000000000..9ca612353 --- /dev/null +++ b/docs/notes/teljesitmeny-osszehasonlitas.md @@ -0,0 +1,283 @@ +# ⚡ PostgreSQL vs MSSQL teljesítmény-összehasonlítás a sensenet kontextusában + +## Összefoglaló + +A PostgreSQL provider bevezetése **önmagában nem garantál jobb teljesítményt**. A sensenet workload jellemzői (hierarchikus fa, sok egyedi kérés, blob kezelés, optimistic concurrency) alapján vannak területek ahol a PostgreSQL **jobb**, és vannak ahol **rosszabb** lenne. Az alábbiakban részletesen elemezzük. + +--- + +## 1. Gyors válasz + +| Szempont | PostgreSQL | MSSQL | Megjegyzés | +|----------|:----------:|:-----:|------------| +| **Általános OLTP** | ≈ | ≈ | Érdemi különbség nincs modern verziókban | +| **Párhuzamos olvasás** | ✅ Jobb | — | MVCC natív, nincs lock escalation | +| **Hierarchikus lekérdezések** | ✅ Jobb | — | Recursive CTE optimalizáltabb | +| **JSON/dinamikus tartalom** | ✅ Jobb | — | `jsonb` natív indexeléssel | +| **Blob kezelés (nagy fájlok)** | — | ✅ Jobb | `FILESTREAM` > `BYTEA`/Large Objects | +| **Bulk insert** | — | ✅ Jobb | `SqlBulkCopy` > `COPY` kis adatnál | +| **Connection pooling** | ✅ Jobb* | — | *PgBouncer-rel, natívan nem | +| **ROWVERSION (optimistic concurrency)** | — | ✅ Jobb | Natív vs. emulált (trigger) | +| **Full-text search** | ≈ | ≈ | Mindkettő képes, más megközelítés | +| **Licencköltség** | ✅ Ingyenes | — | Enterprise licenc drága | +| **Windows integráció** | — | ✅ Jobb | AD auth, SSMS, stb. | + +**Összesítve**: A sensenet tipikus workloadjánál **±10-15% eltérés** várható egyik vagy másik irányba, a konkrét művelettől függően. + +--- + +## 2. Ahol a PostgreSQL jobb lenne + +### 2.1 MVCC és párhuzamos olvasás (🟢 Jelentős előny) + +Az MSSQL alapértelmezetten **pessimistic locking**-ot használ (`READ COMMITTED` lock-based isolation). A sensenet kódjában ezért találunk `WITH (NOLOCK)` hinteket: + +```sql +-- Jelenlegi MSSQL: +SELECT * FROM Nodes WITH (NOLOCK) WHERE Path = @Path +``` + +A PostgreSQL **natívan MVCC-alapú** — az olvasók **soha nem blokkolják** az írókat és fordítva. Ez azt jelenti: + +- **A `WITH (NOLOCK)` hintekre nincs szükség** (és nincs dirty read kockázat) +- **Nagy terhelés alatt kevesebb lock contention** → kevesebb várakozás +- **A connection pool probléma enyhülhet**, mert a connection-ök rövidebb ideig foglaltak + +**Várható hatás**: Nagy párhuzamos olvasási terhelésnél (pl. sok felhasználó egyszerre böngészi a tartalom fát) **10-30% javulás a válaszidőben**. + +### 2.2 Hierarchikus lekérdezések (🟢 Közepes előny) + +A sensenet egy **fa struktúrájú tartalomkezelő** — a `Path` alapú lekérdezések kritikusak. PostgreSQL-ben a recursive CTE-k **jobban optimalizáltak**: + +```sql +-- Recursive CTE teljesítmény: +-- PostgreSQL: natív work table optimalizáció, cycle detection +-- MSSQL: hasonló, de a PostgreSQL optimizer gyakran jobb tervet választ fa-bejárásnál +WITH RECURSIVE subtree AS ( + SELECT node_id, path, parent_node_id FROM nodes WHERE path = '/Root' + UNION ALL + SELECT n.node_id, n.path, n.parent_node_id + FROM nodes n JOIN subtree s ON n.parent_node_id = s.node_id +) +SELECT * FROM subtree; +``` + +Emellett a PostgreSQL `ltree` extension **kifejezetten hierarchikus adatokra** lett tervezve: + +```sql +-- ltree extension (opcionális, de nagy előny lenne): +CREATE EXTENSION ltree; +ALTER TABLE nodes ADD COLUMN path_ltree ltree; +-- Ezután: +SELECT * FROM nodes WHERE path_ltree <@ 'Root.Content.Documents'; +-- GiST index-szel ez O(log n) a LIKE '/Root/Content/Documents%' O(n)-je helyett +``` + +**Várható hatás**: Mély fa lekérdezéseknél **20-50% javulás**, de csak ha az `ltree` extension-t is kihasználjuk. + +### 2.3 Connection pooling PgBouncer-rel (🟢 Jelentős előny) + +A PostgreSQL ökoszisztémában a **PgBouncer** egy érett, dedikált connection pooler: + +``` +Alkalmazás (1000 connection) → PgBouncer (50 pooled) → PostgreSQL (50 backend) +``` + +| Tulajdonság | ADO.NET SqlConnection pool | PgBouncer | +|-------------|---------------------------|-----------| +| Típus | In-process | Külső process | +| Transaction pooling | ❌ | ✅ | +| Statement pooling | ❌ | ✅ | +| Multi-app pooling | ❌ (app-onként külön pool) | ✅ (közös pool) | +| Connection limit kontroll | Csak per connection string | Globális | +| Monitoring | `dotnet-counters` | Dedikált admin konzol | + +**Várható hatás**: A [connection pool kimerülés](connection-pool-kimeriules-elemzes.md) probléma **nagyrészt megoldódna** PgBouncer transaction pooling módban, mert a connection a tranzakció végén azonnal visszakerülne a pool-ba, nem a `SnDataContext.Dispose()` hívásakor. + +### 2.4 Partícionálás (🟢 Hosszú távú előny) + +Nagy adatbázisoknál a PostgreSQL **deklaratív partícionálása** egyszerűbb: + +```sql +-- PostgreSQL natív partícionálás: +CREATE TABLE versions ( + version_id INTEGER, + node_id INTEGER, + creation_date TIMESTAMPTZ +) PARTITION BY RANGE (creation_date); + +CREATE TABLE versions_2024 PARTITION OF versions + FOR VALUES FROM ('2024-01-01') TO ('2025-01-01'); +CREATE TABLE versions_2025 PARTITION OF versions + FOR VALUES FROM ('2025-01-01') TO ('2026-01-01'); +``` + +Az MSSQL-ben ez **Enterprise Edition** funkció (partition function + partition scheme), ami **jelentős licencköltséget** jelent. + +--- + +## 3. Ahol az MSSQL jobb marad + +### 3.1 ROWVERSION / optimistic concurrency (🔴 MSSQL egyértelműen jobb) + +A sensenet **intenzíven használja** a `ROWVERSION`-t (`NodeTimestamp`, `VersionTimestamp`) az optimistic concurrency controlhoz. Az MSSQL-ben ez: + +- **Zero-overhead** — a storage engine automatikusan kezeli +- **Garantáltan monoton növekvő** — adatbázis szinten +- **Lock-free** — nem kell trigger, nem kell sequence + +PostgreSQL-ben ezt **emulálni kell** (`BIGINT` + trigger + sequence), ami: + +- **Trigger overhead**: minden UPDATE-nél fut a trigger → **~5-15% lassulás** UPDATE-intenzív munkaterheléseknél +- **Sequence contention**: nagy párhuzamosságnál a `nextval()` szűk keresztmetszet lehet +- **Nem atomi**: a trigger és az UPDATE nem garantáltan atomi (bár PostgreSQL-ben a trigger a tranzakción belül fut, tehát ez kezelhető) + +**Várható hatás**: Content szerkesztés-intenzív workload-nál **5-15% lassulás** az MSSQL-hez képest. + +### 3.2 Blob kezelés nagy fájloknál (🔴 MSSQL jobb) + +Az MSSQL `FILESTREAM` / `FILETABLE` natívan támogatja a nagy fájlok fájlrendszeri tárolását: + +| Szempont | MSSQL FILESTREAM | PostgreSQL BYTEA | PostgreSQL Large Objects | +|----------|-----------------|------------------|------------------------| +| Max méret | 2 TB (fájlrendszer limit) | 1 GB (BYTEA limit) | 4 TB | +| Streaming | ✅ Win32 API | ❌ Teljes betöltés | ✅ De API bonyolultabb | +| Backup | ✅ Integrált | ✅ | ⚠️ Külön `pg_dump -b` | +| WAL terhelés | Alacsony | 🔴 Magas (teljes blob WAL-ba) | Közepes | +| Teljesítmény >100MB | ✅ Kiváló | 🔴 Gyenge | 🟡 Közepes | + +**Várható hatás**: Ha a sensenet **nagy fájlokat** kezel (videók, képek >10MB), a PostgreSQL **20-50% lassabb** lehet a blob műveleteknél, különösen a WAL terhelés miatt. + +> **Megjegyzés**: A sensenet már most is támogat külső blob provider-eket (Azure Blob Storage, stb.), ami ezt a problémát megkerüli. Ha külső blob storage-ot használnak, ez a pont irreleváns. + +### 3.3 Bulk insert teljesítmény (🟡 MSSQL kicsit jobb) + +| Művelet | MSSQL `SqlBulkCopy` | PostgreSQL `COPY` | +|---------|---------------------|-------------------| +| Kis adatmennyiség (<1000 sor) | ✅ Gyorsabb (kevesebb overhead) | 🟡 Kicsit lassabb | +| Nagy adatmennyiség (>100K sor) | ≈ | ≈ (mindkettő kiváló) | +| `IDENTITY INSERT` | ✅ Natív `KeepIdentity` flag | 🟡 `OVERRIDING SYSTEM VALUE` | +| Minimal logging | ✅ | ✅ (`COPY` is minimálisan logol) | + +**Várható hatás**: Csak telepítéskor és import műveleteknél releváns, **nem éles terhelésnél**. + +### 3.4 Query optimizer (🟡 MSSQL néha jobb) + +Az MSSQL query optimizer **jobban kezeli a paraméteres lekérdezéseket** (parameter sniffing révén), míg a PostgreSQL generic plan-t választhat, ami nem optimális: + +```sql +-- PostgreSQL-ben a prepared statement 5. futtatás után generic plan-t használ +-- Ez nem mindig optimális ha az adateloszlás egyenetlen +PREPARE get_node(int) AS SELECT * FROM nodes WHERE parent_node_id = $1; +EXECUTE get_node(1); -- 1-5: custom plan, 6+: generic plan +``` + +**Várható hatás**: Egyes lekérdezéseknél **5-20% eltérés** mindkét irányba, de ez finomhangolható. + +--- + +## 4. Ahol nincs érdemi különbség + +| Művelet | Megjegyzés | +|---------|-----------| +| Egyszerű CRUD (INSERT/UPDATE/DELETE/SELECT by PK) | Mindkettő ~1ms alatt | +| Index-alapú keresés | B-tree mindkettőben, hasonló teljesítmény | +| `JOIN` műveletek | Hasonló optimizer stratégiák | +| Tranzakció kezelés | Mindkettő ACID | +| `COUNT(*)` nagy táblákon | Mindkettő lassú index scan nélkül | + +--- + +## 5. Teljesítmény-becslés sensenet workload típusonként + +### 5.1 Content böngészés (olvasás-intenzív, ~80% a forgalomból) + +| Metrika | MSSQL (baseline) | PostgreSQL (becslés) | +|---------|:-----------------:|:--------------------:| +| Egyszerű node lekérdezés | 100% | ~100% | +| Fa-bejárás (gyerekek listázása) | 100% | ~110-130% ✅ | +| Párhuzamos olvasás (50+ user) | 100% | ~115-130% ✅ | +| Path-alapú keresés | 100% | ~100-120% ✅ | +| **Átlagos olvasási teljesítmény** | **100%** | **~105-120%** ✅ | + +### 5.2 Content szerkesztés (írás-intenzív, ~15% a forgalomból) + +| Metrika | MSSQL (baseline) | PostgreSQL (becslés) | +|---------|:-----------------:|:--------------------:| +| Node INSERT | 100% | ~95-100% | +| Node UPDATE (ROWVERSION trigger) | 100% | ~85-95% ⚠️ | +| Verzió létrehozás | 100% | ~90-100% | +| Bulk import | 100% | ~95-105% | +| **Átlagos írási teljesítmény** | **100%** | **~90-100%** ⚠️ | + +### 5.3 Blob műveletek (~5% a forgalomból) + +| Metrika | MSSQL (baseline) | PostgreSQL (becslés) | +|---------|:-----------------:|:--------------------:| +| Kis blob (<1MB) | 100% | ~95-100% | +| Közepes blob (1-10MB) | 100% | ~80-90% ⚠️ | +| Nagy blob (>10MB) | 100% | ~50-80% 🔴 | +| Külső blob storage-zal | 100% | ~100% ✅ | + +### 5.4 Összesített becslés (tipikus sensenet workload) + +``` +Összesített = (Olvasás × 0.80) + (Írás × 0.15) + (Blob × 0.05) + +MSSQL: (100% × 0.80) + (100% × 0.15) + (100% × 0.05) = 100% +PostgreSQL: (112% × 0.80) + (95% × 0.15) + (90% × 0.05) = ~108% +PostgreSQL + PgBouncer + ltree: = ~115% +PostgreSQL + külső blob + PgBouncer + ltree: = ~118% +``` + +**Összesítve**: A PostgreSQL **~5-18% javulást** hozhat a tipikus sensenet workload-nál, de ez erősen függ: +- Használnak-e külső blob storage-ot (ha igen → nagyobb előny) +- Mennyire írás-intenzív a workload (ha nagyon → kisebb előny) +- Használnak-e PgBouncer-t (ha igen → connection pool probléma megoldva) +- Kihasználják-e a PostgreSQL-specifikus funkciókat (ltree, jsonb) + +--- + +## 6. A connection pool probléma szempontjából + +A [connection pool kimerülés elemzésben](connection-pool-kimeriules-elemzes.md) leírt probléma szempontjából a PostgreSQL **közvetett javulást** hozhat: + +| Szempont | MSSQL | PostgreSQL | +|----------|-------|------------| +| Lock contention az olvasók között | ⚠️ Lehet | ✅ MVCC → nincs | +| Connection idő olvasásnál | Hosszabb (lock wait) | Rövidebb (nincs lock wait) | +| Külső connection pooler | ❌ Nincs érett megoldás | ✅ PgBouncer | +| Connection per command lehetőség | Lehetséges de nem szokás | PgBouncer transaction mode-dal natív | + +**De**: A connection pool probléma **gyökéroka az alkalmazás rétegben van** (az `SnDataContext` tartja a connection-t a callback futása alatt), ami **mindkét adatbázisnál fennáll**. A PostgreSQL a tüneteket enyhíti (kevesebb lock → rövidebb connection idő), de a gyökérokot nem oldja meg. + +--- + +## 7. Nem-teljesítmény előnyök + +A PostgreSQL-re váltásnak vannak **nem-teljesítmény jellegű előnyei** is, amelyek hosszú távon fontosabbak lehetnek: + +| Előny | Részletek | +|-------|-----------| +| **💰 Licencköltség** | PostgreSQL ingyenes. MSSQL Enterprise: ~$15K/core/év. Egy 8-core szerveren ez ~$120K/év megtakarítás. | +| **🐳 Konténerizálhatóság** | PostgreSQL natívan fut Linuxon, kis image méret (~150MB vs MSSQL ~1.5GB) | +| **☁️ Cloud portabilitás** | Minden felhőben elérhető: AWS RDS/Aurora, Azure Database for PostgreSQL, GCP Cloud SQL | +| **🔓 Vendor lock-in csökkentése** | Nem függ a Microsoft licenc politikájától | +| **🧩 Extension ökoszisztéma** | `ltree`, `pg_trgm`, `PostGIS`, `TimescaleDB`, stb. | +| **👥 Közösség** | Gyorsabban növekvő közösség, több contributor | + +--- + +## 8. Javaslat + +### Ha a fő motiváció a teljesítmény javítása: +❌ **Nem érdemes** — a PostgreSQL provider fejlesztése ~12-17 hét, ami ~5-18% javulást hoz. Ugyanezt az időt fordítva az MSSQL provider és az `SnDataContext` optimalizálására (connection kezelés, query tuning) **nagyobb és gyorsabb eredményt** lehet elérni. + +### Ha a fő motiváció a connection pool probléma megoldása: +⚠️ **Részben segít** — a PgBouncer + MVCC enyhíti a tüneteket, de a gyökérok (application-level connection kezelés) mindkét adatbázisnál javítandó. + +### Ha a fő motiváció a költségcsökkentés és portabilitás: +✅ **Érdemes** — az MSSQL licencköltség megtakarítása 1-2 év alatt megtérülhet, és a konténerizálhatóság / cloud portabilitás stratégiai előny. + +### Ha a fő motiváció az összes fenti: +✅ **Érdemes, de fázisoltan** — először javítani az absztrakt réteget és a connection kezelést (ami mindkét provider-nek jó), aztán a PostgreSQL providert ráépíteni. diff --git a/src/Common/Storage/Data/DataReaderExtension.cs b/src/Common/Storage/Data/DataReaderExtension.cs index 9ea9bb842..ec5c0f163 100644 --- a/src/Common/Storage/Data/DataReaderExtension.cs +++ b/src/Common/Storage/Data/DataReaderExtension.cs @@ -52,7 +52,18 @@ public static short GetSafeInt16(this IDataReader reader, int index) /// The index of the column to find. public static bool GetSafeBooleanFromByte(this IDataReader reader, int index) { - return !reader.IsDBNull(index) && Convert.ToBoolean(reader.GetByte(index)); + if (reader.IsDBNull(index)) + return false; + var value = reader.GetValue(index); + return value switch + { + byte b => b != 0, + short s => s != 0, + int i => i != 0, + long l => l != 0, + bool b => b, + _ => Convert.ToBoolean(value) + }; } /// /// Converts a Boolean DB column value to a .NET bool value safely. @@ -91,7 +102,12 @@ public static long GetSafeLongFromBytes(this IDataReader reader, int index) if (reader.IsDBNull(index)) return 0L; - return Tools.Utility.Convert.BytesToLong((byte[]) reader[index]); + var value = reader[index]; + if (value is long l) + return l; + if (value is int i) + return i; + return Tools.Utility.Convert.BytesToLong((byte[]) value); } public static DateTime GetDateTimeUtc(this IDataReader reader, int ordinal) @@ -102,7 +118,16 @@ public static DateTime GetDateTimeUtc(this IDataReader reader, int ordinal) public static byte[] GetSafeByteArray(this IDataReader reader, int index) { - return reader.IsDBNull(index) ? null : (byte[])reader.GetValue(index); + if (reader.IsDBNull(index)) return null; + var value = reader.GetValue(index); + if (value is byte[] bytes) + return bytes; + // PostgreSQL returns BIGINT timestamps as long instead of byte[] + if (value is long l) + return BitConverter.GetBytes(System.Net.IPAddress.HostToNetworkOrder(l)); + if (value is int i) + return BitConverter.GetBytes(System.Net.IPAddress.HostToNetworkOrder((long)i)); + return (byte[])value; } /* ============================================================================= */ diff --git a/src/Common/Storage/Data/SnDataContext.cs b/src/Common/Storage/Data/SnDataContext.cs index 0bc558267..4f5e3553d 100644 --- a/src/Common/Storage/Data/SnDataContext.cs +++ b/src/Common/Storage/Data/SnDataContext.cs @@ -1,7 +1,6 @@ using System; using System.Data; using System.Data.Common; -using Microsoft.Data.SqlClient; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -53,7 +52,7 @@ public virtual void Dispose() public abstract DbCommand CreateCommand(); public abstract DbParameter CreateParameter(); - public DbParameter CreateParameter(string name, DbType dbType, object value) + public virtual DbParameter CreateParameter(string name, DbType dbType, object value) { var prm = CreateParameter(); prm.ParameterName = name; @@ -61,7 +60,7 @@ public DbParameter CreateParameter(string name, DbType dbType, object value) prm.Value = value; return prm; } - public DbParameter CreateParameter(string name, DbType dbType, int size, object value) + public virtual DbParameter CreateParameter(string name, DbType dbType, int size, object value) { var prm = CreateParameter(); prm.ParameterName = name; @@ -191,8 +190,22 @@ public async Task ExecuteReaderAsync(string script, Action setP internal static bool ShouldRetryOnError(Exception ex) { //TODO: generalize the expression by relying on error codes instead of hardcoded message texts - return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || - (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); + if (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) + return true; + + if (ex is DbException dbEx) + { + // Check for the IsTransient property (available on NpgsqlException and newer SqlException) + var isTransientProp = dbEx.GetType().GetProperty("IsTransient"); + if (isTransientProp?.GetValue(dbEx) is bool isTransient && isTransient) + return true; + + // Fallback for SQL Server specific network error message + if (dbEx.Message.Contains("A network-related or instance-specific error occurred")) + return true; + } + + return false; } protected string GetOperationMessage(string name, string script) diff --git a/src/Common/Storage/Data/TransactionWrapper.cs b/src/Common/Storage/Data/TransactionWrapper.cs index 3a97f8302..19cdd9406 100644 --- a/src/Common/Storage/Data/TransactionWrapper.cs +++ b/src/Common/Storage/Data/TransactionWrapper.cs @@ -63,7 +63,20 @@ public virtual void Rollback() { using (var op = SnTrace.Database.StartOperation("Transaction.Rollback " + Status)) { - Transaction.Rollback(); + try + { + Transaction.Rollback(); + } + catch (ObjectDisposedException) + { + // Transaction or connection already disposed — nothing to roll back. + SnTrace.Database.Write("Transaction.Rollback skipped: already disposed."); + } + catch (InvalidOperationException) + { + // Transaction already completed or connection broken. + SnTrace.Database.Write("Transaction.Rollback skipped: invalid operation."); + } Status = TransactionStatus.Aborted; op.Successful = true; } diff --git a/src/ContentRepository.MsSql/MsSqlDataProvider.cs b/src/ContentRepository.MsSql/MsSqlDataProvider.cs index cf2a852d5..c59254b13 100644 --- a/src/ContentRepository.MsSql/MsSqlDataProvider.cs +++ b/src/ContentRepository.MsSql/MsSqlDataProvider.cs @@ -331,6 +331,20 @@ public override async STT.Task InstallDatabaseAsync(CancellationToken cancellati { using var op = SnTrace.Database.StartOperation("MsSqlDataProvider: InstallDatabaseAsync()."); + if (!_dbInstallerOptions.EnableFirstInstallDB) + { + _logger.LogTrace("EnableFirstInstallDB is disabled. Skipping database installation."); + op.Successful = true; + return; + } + + if (await IsDatabaseAlreadyInstalledAsync(cancellationToken).ConfigureAwait(false)) + { + _logger.LogWarning("Database already contains data. Skipping installation to prevent data loss."); + op.Successful = true; + return; + } + if (!string.IsNullOrEmpty(_dbInstallerOptions.DatabaseName)) { _logger.LogTrace($"Executing installer for database {_dbInstallerOptions.DatabaseName}."); @@ -554,6 +568,35 @@ protected override object ConvertInt64ToTimestamp(long timestamp) return bytes; } + /// + /// Checks whether the database has already been installed by detecting the presence of + /// the Nodes table and at least one row. Used as a safety guard to prevent accidental + /// reinstallation and data loss. + /// + private async STT.Task IsDatabaseAlreadyInstalledAsync(CancellationToken cancellationToken) + { + try + { + using var ctx = CreateDataContext(cancellationToken); + var result = await ctx.ExecuteScalarAsync(@" + IF EXISTS (SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'Nodes') + SELECT COUNT(1) FROM Nodes + ELSE + SELECT 0").ConfigureAwait(false); + + var count = Convert.ToInt32(result); + if (count > 0) + _logger.LogTrace($"IsDatabaseAlreadyInstalledAsync: Nodes table contains {count} rows, database is already installed."); + + return count > 0; + } + catch (Exception ex) + { + _logger.LogWarning(ex, "IsDatabaseAlreadyInstalledAsync: Could not determine database state. Assuming not installed."); + return false; + } + } + /// /// Loads the provided embedded SQL script from the current assembly and executes it /// on the configured database. diff --git a/src/ContentRepository.MsSql/MsSqlDatabaseInstaller.cs b/src/ContentRepository.MsSql/MsSqlDatabaseInstaller.cs index be9afbd1c..80419fcee 100644 --- a/src/ContentRepository.MsSql/MsSqlDatabaseInstaller.cs +++ b/src/ContentRepository.MsSql/MsSqlDatabaseInstaller.cs @@ -24,6 +24,14 @@ protected DbCreationException(SerializationInfo info, StreamingContext context) [OptionsClass(sectionName: "sensenet:install:mssql")] public class MsSqlDatabaseInstallationOptions { + /// + /// Allows the application to install the database schema and initial data on first run. + /// Default is false to prevent accidental data loss in production environments. + /// Set to true only when deploying a brand new instance. + /// In appsettings.json: sensenet:install:mssql:EnableFirstInstallDB + /// As environment variable: sensenet__install__mssql__EnableFirstInstallDB=true + /// + public bool EnableFirstInstallDB { get; set; } = false; /// /// Database server name. /// diff --git a/src/ContentRepository.PostgreSql/Components/PgSqlClientStoreComponent.cs b/src/ContentRepository.PostgreSql/Components/PgSqlClientStoreComponent.cs new file mode 100644 index 000000000..0fd44591e --- /dev/null +++ b/src/ContentRepository.PostgreSql/Components/PgSqlClientStoreComponent.cs @@ -0,0 +1,52 @@ +using System; +using System.Threading; +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Security.Clients; +using SenseNet.ContentRepository.Storage.Data; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient; +using SenseNet.Diagnostics; +using SenseNet.Packaging; + +namespace SenseNet.ContentRepository.Components +{ + public class PgSqlClientStoreComponent : SnComponent + { + public override string ComponentId { get; } = "SenseNet.ClientStore.PgSql"; + + public override void AddPatches(PatchBuilder builder) + { + builder.Install("1.0.0", "2024-01-01", "PostgreSQL implementation of Client store.") + .DependsOn("SenseNet.Services", "7.7.23") + .ActionOnBefore(context => + { + if (!(Providers.Instance.DataProvider is RelationalDataProviderBase dataProvider)) + throw new InvalidOperationException("Cannot install PgSqlClientStoreComponent because it is " + + $"incompatible with Data provider {Providers.Instance.DataProvider.GetType().FullName}."); + + try + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreComponent: " + + "Install PostgreSQL implementation of Client store (v1.0.0). " + + "Script name: PgSqlClientStoreDataProvider.DropAndCreateTablesSql."); + using var ctx = dataProvider.CreateDataContext(CancellationToken.None); + ctx.ExecuteNonQueryAsync(PgSqlClientStoreDataProvider.DropAndCreateTablesSql) + .GetAwaiter().GetResult(); + op.Successful = true; + } + catch (Exception ex) + { + context.Log($"Error during installation of PgSqlClientStore: {ex.Message}"); + throw; + } + }) + .Action(context => + { + var clientStore = context.GetService(); + var clientOptions = context.GetService>().Value; + + clientStore.EnsureClientsAsync(clientOptions.Authority, clientOptions.RepositoryUrl.RemoveUrlSchema()).GetAwaiter().GetResult(); + }); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Components/PgSqlExclusiveLockComponent.cs b/src/ContentRepository.PostgreSql/Components/PgSqlExclusiveLockComponent.cs new file mode 100644 index 000000000..df1187d60 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Components/PgSqlExclusiveLockComponent.cs @@ -0,0 +1,43 @@ +using System; +using System.Threading; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Data; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient; +using SenseNet.Diagnostics; +using SenseNet.Packaging; + +namespace SenseNet.ContentRepository.Components +{ + public class PgSqlExclusiveLockComponent : SnComponent + { + public override string ComponentId { get; } = "SenseNet.ExclusiveLock.PgSql"; + + public override void AddPatches(PatchBuilder builder) + { + builder.Install("1.0.0", "2024-01-01", "PostgreSQL data provider extension for the Exclusive lock feature.") + .DependsOn("SenseNet.Services", "7.7.22") + .ActionOnBefore(context => + { + var dataStore = Providers.Instance.DataStore; + if (!(dataStore.DataProvider is RelationalDataProviderBase dataProvider)) + throw new InvalidOperationException("Cannot install PgSqlExclusiveLockComponent because it is " + + $"incompatible with Data provider {dataStore.DataProvider.GetType().FullName}."); + + try + { + using var op = SnTrace.Database.StartOperation("PgSqlExclusiveLockComponent: " + + "Install PostgreSQL data provider extension for the Exclusive lock feature (v1.0.0). " + + "Script name: PgSqlExclusiveLockDataProvider.CreationScript."); + using var ctx = dataProvider.CreateDataContext(CancellationToken.None); + ctx.ExecuteNonQueryAsync(PgSqlExclusiveLockDataProvider.CreationScript).GetAwaiter().GetResult(); + op.Successful = true; + } + catch (Exception ex) + { + context.Log($"Error during installation of PgSqlExclusiveLockComponent: {ex.Message}"); + throw; + } + }); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Components/PgSqlStatisticsComponent.cs b/src/ContentRepository.PostgreSql/Components/PgSqlStatisticsComponent.cs new file mode 100644 index 000000000..23711ec8b --- /dev/null +++ b/src/ContentRepository.PostgreSql/Components/PgSqlStatisticsComponent.cs @@ -0,0 +1,43 @@ +using System; +using System.Threading; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Data; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient; +using SenseNet.Diagnostics; +using SenseNet.Packaging; + +namespace SenseNet.ContentRepository.Components +{ + public class PgSqlStatisticsComponent : SnComponent + { + public override string ComponentId { get; } = "SenseNet.Statistics.PgSql"; + + public override void AddPatches(PatchBuilder builder) + { + builder.Install("1.0.0", "2024-01-01", "PostgreSQL data provider extension for the statistical data handling feature.") + .DependsOn("SenseNet.Services", "7.7.22") + .ActionOnBefore(context => + { + var dataStore = Providers.Instance.DataStore; + if (!(dataStore.DataProvider is RelationalDataProviderBase dataProvider)) + throw new InvalidOperationException("Cannot install PgSqlStatisticsComponent because it is " + + $"incompatible with Data provider {dataStore.DataProvider.GetType().FullName}."); + + try + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticsComponent: " + + "Install PostgreSQL data provider extension for the statistical data handling feature (v1.0.0). " + + "Script name: PgSqlStatisticalDataProvider.CreationScript"); + using var ctx = dataProvider.CreateDataContext(CancellationToken.None); + ctx.ExecuteNonQueryAsync(PgSqlStatisticalDataProvider.CreationScript).GetAwaiter().GetResult(); + op.Successful = true; + } + catch (Exception ex) + { + context.Log($"Error during installation of PgSqlStatisticsComponent: {ex.Message}"); + throw; + } + }); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlAccessTokenDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlAccessTokenDataProvider.cs new file mode 100644 index 000000000..534f80567 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlAccessTokenDataProvider.cs @@ -0,0 +1,249 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.Data.Common; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Security; +using SenseNet.Diagnostics; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public class PgSqlAccessTokenDataProvider : IAccessTokenDataProvider + { + private readonly IRetrier _retrier; + private DataOptions DataOptions { get; } + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlAccessTokenDataProvider(IOptions dataOptions, + IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + DataOptions = dataOptions?.Value ?? new DataOptions(); + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + } + + public async System.Threading.Tasks.Task DeleteAllAccessTokensAsync(CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync( + @"TRUNCATE TABLE ""AccessTokens""").ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task SaveAccessTokenAsync(AccessToken token, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlAccessTokenDataProvider: " + + "SaveAccessToken: UserId: {0}, ContentId: {1}", token.UserId, token.ContentId); + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await ctx.ExecuteReaderAsync( + @"INSERT INTO ""AccessTokens"" (""Value"", ""UserId"", ""ContentId"", ""Feature"", ""CreationDate"", ""ExpirationDate"") +VALUES (@Value, @UserId, @ContentId, @Feature, @CreationDate, @ExpirationDate) +RETURNING ""AccessTokenId""", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Value", DbType.String, 1000, token.Value), + ctx.CreateParameter("@UserId", DbType.Int32, token.UserId), + ctx.CreateParameter("@ContentId", DbType.Int32, token.ContentId != 0 ? (object)token.ContentId : DBNull.Value), + ctx.CreateParameter("@Feature", DbType.String, 1000, token.Feature != null ? (object)token.Feature : DBNull.Value), + ctx.CreateParameter("@CreationDate", DbType.DateTime2, token.CreationDate), + ctx.CreateParameter("@ExpirationDate", DbType.DateTime2, token.ExpirationDate), + }); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + if (await reader.ReadAsync(cancel).ConfigureAwait(false)) + token.Id = reader.GetInt32(0); + return true; + }).ConfigureAwait(false); + op.Successful = true; + } + + public async Task LoadAccessTokenByIdAsync(int accessTokenId, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await ctx.ExecuteReaderAsync( + @"SELECT ""AccessTokenId"", ""Value"", ""UserId"", ""ContentId"", ""Feature"", + ""CreationDate"", ""ExpirationDate"" +FROM ""AccessTokens"" WHERE ""AccessTokenId"" = @Id AND ""ExpirationDate"" > @Now", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Id", DbType.Int32, accessTokenId), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + return await reader.ReadAsync(cancel).ConfigureAwait(false) + ? GetAccessTokenFromReader(reader) : null; + }).ConfigureAwait(false); + return result; + } + + public async Task LoadAccessTokenAsync(string tokenValue, int contentId, string feature, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var sql = @"SELECT ""AccessTokenId"", ""Value"", ""UserId"", ""ContentId"", ""Feature"", + ""CreationDate"", ""ExpirationDate"" +FROM ""AccessTokens"" +WHERE ""Value"" = @Value AND ""ExpirationDate"" > @Now"; + + if (contentId > 0) + sql += @" AND ""ContentId"" = @ContentId"; + else + sql += @" AND ""ContentId"" IS NULL"; + + if (feature != null) + sql += @" AND ""Feature"" = @Feature"; + else + sql += @" AND ""Feature"" IS NULL"; + + var result = await ctx.ExecuteReaderAsync(sql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Value", DbType.String, 1000, tokenValue)); + cmd.Parameters.Add(ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow)); + if (contentId > 0) + cmd.Parameters.Add(ctx.CreateParameter("@ContentId", DbType.Int32, contentId)); + if (feature != null) + cmd.Parameters.Add(ctx.CreateParameter("@Feature", DbType.String, 1000, feature)); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + return await reader.ReadAsync(cancel).ConfigureAwait(false) + ? GetAccessTokenFromReader(reader) : null; + }).ConfigureAwait(false); + return result; + } + + public async Task LoadAccessTokensAsync(int userId, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await ctx.ExecuteReaderAsync( + @"SELECT ""AccessTokenId"", ""Value"", ""UserId"", ""ContentId"", ""Feature"", + ""CreationDate"", ""ExpirationDate"" +FROM ""AccessTokens"" WHERE ""UserId"" = @UserId AND ""ExpirationDate"" > @Now", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@UserId", DbType.Int32, userId), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + var tokens = new List(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + tokens.Add(GetAccessTokenFromReader(reader)); + return tokens.ToArray(); + }).ConfigureAwait(false); + return result; + } + + public async System.Threading.Tasks.Task UpdateAccessTokenAsync(string tokenValue, DateTime newExpirationDate, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var count = await ctx.ExecuteNonQueryAsync( + @"UPDATE ""AccessTokens"" SET ""ExpirationDate"" = @NewExpiration +WHERE ""Value"" = @Value AND ""ExpirationDate"" > @Now", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Value", DbType.String, 1000, tokenValue), + ctx.CreateParameter("@NewExpiration", DbType.DateTime2, newExpirationDate), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }).ConfigureAwait(false); + if (count == 0) + throw new InvalidAccessTokenException("Token not found or it is expired."); + } + + public async System.Threading.Tasks.Task DeleteAccessTokenAsync(string tokenValue, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""AccessTokens"" WHERE ""Value"" = @Value", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Value", DbType.String, 1000, tokenValue)); + }).ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task DeleteAccessTokensByUserAsync(int userId, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""AccessTokens"" WHERE ""UserId"" = @UserId", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@UserId", DbType.Int32, userId)); + }).ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task DeleteAccessTokensByContentAsync(int contentId, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""AccessTokens"" WHERE ""ContentId"" = @ContentId", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ContentId", DbType.Int32, contentId)); + }).ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task CleanupAccessTokensAsync(CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""AccessTokens"" WHERE ""ExpirationDate"" < @Now", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow)); + }).ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task DeleteAccessTokensAsync(int userId, int contentId, string feature, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + + var sql = @"DELETE FROM ""AccessTokens"" WHERE 1=1"; + if (userId > 0) + sql += @" AND ""UserId"" = @UserId"; + if (contentId > 0) + sql += @" AND ""ContentId"" = @ContentId"; + if (feature != null) + sql += @" AND ""Feature"" = @Feature"; + + await ctx.ExecuteNonQueryAsync(sql, cmd => + { + if (userId > 0) + cmd.Parameters.Add(ctx.CreateParameter("@UserId", DbType.Int32, userId)); + if (contentId > 0) + cmd.Parameters.Add(ctx.CreateParameter("@ContentId", DbType.Int32, contentId)); + if (feature != null) + cmd.Parameters.Add(ctx.CreateParameter("@Feature", DbType.String, 1000, feature)); + }).ConfigureAwait(false); + } + + private static AccessToken GetAccessTokenFromReader(DbDataReader reader) + { + return new AccessToken + { + Id = reader.GetInt32(reader.GetOrdinal("AccessTokenId")), + Value = reader.GetString(reader.GetOrdinal("Value")), + UserId = reader.GetInt32(reader.GetOrdinal("UserId")), + ContentId = reader.IsDBNull(reader.GetOrdinal("ContentId")) + ? 0 : reader.GetInt32(reader.GetOrdinal("ContentId")), + Feature = reader.IsDBNull(reader.GetOrdinal("Feature")) + ? null : reader.GetString(reader.GetOrdinal("Feature")), + CreationDate = reader.GetDateTime(reader.GetOrdinal("CreationDate")), + ExpirationDate = reader.GetDateTime(reader.GetOrdinal("ExpirationDate")), + }; + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlBlobMetaDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlBlobMetaDataProvider.cs new file mode 100644 index 000000000..e626412e4 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlBlobMetaDataProvider.cs @@ -0,0 +1,698 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Data.MsSqlClient; +using SenseNet.Diagnostics; +using SenseNet.Tools; +// ReSharper disable AccessToDisposedClosure + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + /// + /// Contains the PostgreSQL-specific implementation of the IBlobStorageMetaDataProvider interface that + /// is responsible for binary-related operations in the main metadata database. + /// + public partial class PgSqlBlobMetaDataProvider : IBlobStorageMetaDataProvider + { + private readonly IRetrier _retrier; + private DataOptions DataOptions { get; } + private BlobStorageOptions BlobStorageOptions { get; } + private IBlobProviderStore Providers { get; } + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlBlobMetaDataProvider(IBlobProviderStore providers, IOptions dataOptions, + IOptions blobStorageOptions, IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + Providers = providers; + DataOptions = dataOptions?.Value ?? new DataOptions(); + BlobStorageOptions = blobStorageOptions?.Value ?? new BlobStorageOptions(); + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + } + + /* ======================================================================================= IBlobStorageMetaDataProvider */ + + private static string ValidateExtension(string originalExtension) + { + return originalExtension.Length == 0 + ? string.Empty + : originalExtension; + } + + public async Task GetBlobStorageContextAsync(int fileId, bool clearStream, int versionId, int propertyTypeId, + CancellationToken cancellationToken) + { + var sql = GetBlobStorageContextScript; + if (clearStream) + sql = ClearStreamByFileIdScript + sql; + + cancellationToken.ThrowIfCancellationRequested(); + + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: " + + "GetBlobStorageContext(fileId: {0}, clearStream: {1}, versionId: {2}, propertyTypeId: {3})", + fileId, clearStream, versionId, propertyTypeId); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await ctx.ExecuteReaderAsync(sql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@FileId", DbType.Int32, fileId)); + if (clearStream) + { + cmd.Parameters.Add(ctx.CreateParameter("@VersionId", DbType.Int32, versionId)); + cmd.Parameters.Add(ctx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId)); + } + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + if (!await reader.ReadAsync(cancel).ConfigureAwait(false)) + return null; + + var length = reader.GetSafeInt64(0); + var providerName = reader.GetSafeString(1); + var providerData = reader.GetSafeString(2); + var provider = Providers.GetProvider(providerName); + + return new BlobStorageContext(provider, providerData) + { + VersionId = versionId, + PropertyTypeId = propertyTypeId, + FileId = fileId, + Length = length, + BlobProviderData = provider is IBuiltInBlobProvider + ? new BuiltinBlobProviderData() + : provider.ParseData(providerData) + }; + }).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + public async System.Threading.Tasks.Task InsertBinaryPropertyAsync(IBlobProvider blobProvider, BinaryDataValue value, int versionId, int propertyTypeId, + bool isNewNode, SnDataContext dataContext) + { + var streamLength = value.Stream?.Length ?? 0; + + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: InsertBinaryPropertyAsync: " + + "BlobProvider: {0}, ContentType: {1}, FileName: {2}, Size: {3}, VersionId: {4}, PropertyTypeId: {5}, IsNewNode: {6})", + blobProvider.GetType().Name, value.ContentType, value.FileName, streamLength, versionId, propertyTypeId, isNewNode); + + var ctx = new BlobStorageContext(blobProvider) { VersionId = versionId, PropertyTypeId = propertyTypeId, FileId = 0, Length = streamLength }; + + if (!(blobProvider is IBuiltInBlobProvider)) + { + await blobProvider.AllocateAsync(ctx, dataContext.CancellationToken).ConfigureAwait(false); + + using (var stream = blobProvider.GetStreamForWrite(ctx)) + value.Stream?.CopyTo(stream); + + value.BlobProviderName = ctx.Provider.GetType().FullName; + value.BlobProviderData = BlobStorageContext.SerializeBlobProviderData(ctx.BlobProviderData); + } + + if(!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + var sql = isNewNode ? InsertBinaryPropertyScript : DeleteAndInsertBinaryPropertyScript; + if (!isNewNode) + dataContext.NeedToCleanupFiles = true; + + await sqlCtx.ExecuteReaderAsync(sql, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@VersionId", DbType.Int32, versionId != 0 ? (object)versionId : DBNull.Value), + sqlCtx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId != 0 ? (object)propertyTypeId : DBNull.Value), + sqlCtx.CreateParameter("@ContentType", DbType.String, 450, value.ContentType), + sqlCtx.CreateParameter("@FileNameWithoutExtension", DbType.String, 450, value.FileName.FileNameWithoutExtension == null ? DBNull.Value : (object)value.FileName.FileNameWithoutExtension), + sqlCtx.CreateParameter("@Extension", DbType.String, 50, ValidateExtension(value.FileName.Extension)), + sqlCtx.CreateParameter("@Size", DbType.Int64, Math.Max(0, value.Size)), + sqlCtx.CreateParameter("@BlobProvider", DbType.String, 450, value.BlobProviderName != null ? (object)value.BlobProviderName : DBNull.Value), + sqlCtx.CreateParameter("@BlobProviderData", DbType.String, int.MaxValue, value.BlobProviderData != null ? (object)value.BlobProviderData : DBNull.Value), + sqlCtx.CreateParameter("@Checksum", DbType.AnsiString, 200, value.Checksum != null ? (object)value.Checksum : DBNull.Value), + }); + }, async (reader, cancel) => + { + if (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + value.Id = Convert.ToInt32(reader[0]); + value.FileId = Convert.ToInt32(reader[1]); + value.Timestamp = Convert.ToInt64(reader.GetValue(2)); + } + return true; + }).ConfigureAwait(false); + + if (blobProvider is IBuiltInBlobProvider && value.Stream != null) + { + ctx.FileId = value.FileId; + ctx.BlobProviderData = new BuiltinBlobProviderData(); + + await PgSqlBuiltInBlobProvider.AddStreamAsync(ctx, value.Stream, sqlCtx).ConfigureAwait(false); + } + + op.Successful = true; + } + + public async System.Threading.Tasks.Task InsertBinaryPropertyWithFileIdAsync(BinaryDataValue value, int versionId, int propertyTypeId, bool isNewNode, + SnDataContext dataContext) + { + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: InsertBinaryPropertyWithFileId: " + + "VersionId: {0}, PropertyTypeId: {1}, FileId: {2}, IsNewNode: {3}", + versionId, propertyTypeId, value.FileId, isNewNode); + + var sql = isNewNode ? InsertBinaryPropertyWithKnownFileIdScript : DeleteAndInsertBinaryPropertyWithKnownFileIdScript; + if (!isNewNode) + dataContext.NeedToCleanupFiles = true; + + if (!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + value.Id = (int)await sqlCtx.ExecuteScalarAsync(sql, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@VersionId", DbType.Int32, versionId != 0 ? (object) versionId : DBNull.Value), + sqlCtx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId != 0 ? (object) propertyTypeId : DBNull.Value), + sqlCtx.CreateParameter("@FileId", DbType.Int32, value.FileId), + }); + }).ConfigureAwait(false); + + op.Successful = true; + } + + public async System.Threading.Tasks.Task UpdateBinaryPropertyAsync(IBlobProvider blobProvider, BinaryDataValue value, SnDataContext dataContext) + { + var streamLength = value.Stream?.Length ?? 0; + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: UpdateBinaryProperty: " + + "BlobProvider: {0}, BinaryPropertyId: {1}, FileId: {2}, ContentType: {3}, FileName: {4}, Size: {5}", + blobProvider.GetType().Name, value.Id, value.FileId, value.ContentType, value.FileName, streamLength); + + var isExternal = false; + if (!(blobProvider is IBuiltInBlobProvider)) + { + var ctx = new BlobStorageContext(blobProvider) + { + VersionId = 0, + PropertyTypeId = 0, + FileId = value.FileId, + Length = streamLength, + }; + + await blobProvider.AllocateAsync(ctx, dataContext.CancellationToken).ConfigureAwait(false); + isExternal = true; + + value.BlobProviderName = ctx.Provider.GetType().FullName; + value.BlobProviderData = BlobStorageContext.SerializeBlobProviderData(ctx.BlobProviderData); + } + else + { + value.BlobProviderName = null; + value.BlobProviderData = null; + } + + if (blobProvider is IBuiltInBlobProvider) + { + if (streamLength > int.MaxValue) + throw new NotSupportedException(); + } + + var isRepositoryStream = value.Stream is RepositoryStream; + var hasStream = isRepositoryStream || value.Stream is MemoryStream; + if (!isExternal && !hasStream) + { + SnTrace.Database.Write("Do not do any database operation because the stream is not modified."); + op.Successful = true; + return; + } + + if(!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + // For built-in blob providers we need conditional logic: + // check if a new Files row is needed, then either insert-new or update-in-place. + // For external blob providers we always insert a new Files row. + int fileId; + if (blobProvider is IBuiltInBlobProvider) + { + // Step 1: check if we need a new Files row + var needNewRow = (bool)await sqlCtx.ExecuteScalarAsync(UpdateBinaryPropertyNeedNewFileRowScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@BinaryPropertyId", DbType.Int32, value.Id), + sqlCtx.CreateParameter("@BlobProvider", DbType.String, 450, value.BlobProviderName != null ? (object)value.BlobProviderName : DBNull.Value), + }); + }).ConfigureAwait(false); + + // Step 2: either insert a new file row or update in-place + var updateSql = needNewRow ? UpdateBinaryPropertyNewFilerowScript : UpdateBinaryPropertyInPlaceScript; + fileId = (int)await sqlCtx.ExecuteScalarAsync(updateSql, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@BinaryPropertyId", DbType.Int32, value.Id), + sqlCtx.CreateParameter("@ContentType", DbType.String, 450, value.ContentType), + sqlCtx.CreateParameter("@FileNameWithoutExtension", DbType.String, 450, value.FileName.FileNameWithoutExtension == null ? DBNull.Value : (object)value.FileName.FileNameWithoutExtension), + sqlCtx.CreateParameter("@Extension", DbType.String, 50, ValidateExtension(value.FileName.Extension)), + sqlCtx.CreateParameter("@Size", DbType.Int64, value.Size), + sqlCtx.CreateParameter("@Checksum", DbType.AnsiString, 200, value.Checksum != null ? (object)value.Checksum : DBNull.Value), + sqlCtx.CreateParameter("@BlobProvider", DbType.String, 450, value.BlobProviderName != null ? (object)value.BlobProviderName : DBNull.Value), + sqlCtx.CreateParameter("@BlobProviderData", DbType.String, int.MaxValue, value.BlobProviderData != null ? (object)value.BlobProviderData : DBNull.Value), + }); + }).ConfigureAwait(false); + } + else + { + fileId = (int)await sqlCtx.ExecuteScalarAsync(UpdateBinaryPropertyNewFilerowScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@BinaryPropertyId", DbType.Int32, value.Id), + sqlCtx.CreateParameter("@ContentType", DbType.String, 450, value.ContentType), + sqlCtx.CreateParameter("@FileNameWithoutExtension", DbType.String, 450, value.FileName.FileNameWithoutExtension == null ? DBNull.Value : (object)value.FileName.FileNameWithoutExtension), + sqlCtx.CreateParameter("@Extension", DbType.String, 50, ValidateExtension(value.FileName.Extension)), + sqlCtx.CreateParameter("@Size", DbType.Int64, value.Size), + sqlCtx.CreateParameter("@Checksum", DbType.AnsiString, 200, value.Checksum != null ? (object)value.Checksum : DBNull.Value), + sqlCtx.CreateParameter("@BlobProvider", DbType.String, 450, value.BlobProviderName != null ? (object)value.BlobProviderName : DBNull.Value), + sqlCtx.CreateParameter("@BlobProviderData", DbType.String, int.MaxValue, value.BlobProviderData != null ? (object)value.BlobProviderData : DBNull.Value), + }); + }).ConfigureAwait(false); + } + + if (fileId > 0 && fileId != value.FileId) + value.FileId = fileId; + + if (blobProvider is IBuiltInBlobProvider) + { + var ctx = new BlobStorageContext(blobProvider, value.BlobProviderData) + { + VersionId = 0, + PropertyTypeId = 0, + FileId = value.FileId, + Length = streamLength, + BlobProviderData = new BuiltinBlobProviderData() + }; + + await PgSqlBuiltInBlobProvider.UpdateStreamAsync(ctx, value.Stream, sqlCtx).ConfigureAwait(false); + } + else + { + var ctx = new BlobStorageContext(blobProvider, value.BlobProviderData) + { + VersionId = 0, + PropertyTypeId = 0, + FileId = value.FileId, + Length = streamLength, + }; + if (streamLength == 0) + { + await blobProvider.ClearAsync(ctx, dataContext.CancellationToken).ConfigureAwait(false); + } + else + { + using (var stream = blobProvider.GetStreamForWrite(ctx)) + value.Stream?.CopyTo(stream); + } + } + op.Successful = true; + } + + public async System.Threading.Tasks.Task DeleteBinaryPropertyAsync(int versionId, int propertyTypeId, SnDataContext dataContext) + { + if (!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: " + + "DeleteBinaryProperty(versionId: {0}, propertyTypeId: {1})", versionId, propertyTypeId); + await sqlCtx.ExecuteNonQueryAsync(DeleteBinaryPropertyScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@VersionId", DbType.Int32, versionId), + sqlCtx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), + }); + }).ConfigureAwait(false); + op.Successful = true; + } + + public async System.Threading.Tasks.Task DeleteBinaryPropertiesAsync(IEnumerable versionIds, SnDataContext dataContext) + { + if (!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + var idsParam = string.Join(",", versionIds.Select(x => x.ToString())); + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: " + + "DeleteBinaryProperties(versionIds: [{0}])", idsParam); + await sqlCtx.ExecuteNonQueryAsync(DeleteBinaryPropertiesScript, cmd => + { + cmd.Parameters.Add(sqlCtx.CreateParameter("@VersionIds", DbType.String, idsParam.Length, idsParam)); + }).ConfigureAwait(false); + op.Successful = true; + } + + public async Task LoadBinaryPropertyAsync(int versionId, int propertyTypeId, SnDataContext dataContext) + { + if (!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: " + + "LoadBinaryProperty(versionId: {0}, propertyTypeId: {1})", versionId, propertyTypeId); + var result = await sqlCtx.ExecuteReaderAsync(LoadBinaryPropertyScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@VersionId", DbType.Int32, versionId), + sqlCtx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), + }); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + if (!await reader.ReadAsync(cancel).ConfigureAwait(false)) + return null; + + var size = reader.GetInt64("Size"); + var binaryPropertyId = reader.GetInt32("BinaryPropertyId"); + var fileId = reader.GetInt32("FileId"); + var providerName = reader.GetSafeString("BlobProvider"); + var providerTextData = reader.GetSafeString("BlobProviderData"); + var provider = Providers.GetProvider(providerName); + var context = new BlobStorageContext(provider, providerTextData) + { + VersionId = versionId, + PropertyTypeId = propertyTypeId, + FileId = fileId, + Length = size + }; + Stream stream = null; + if (provider is IBuiltInBlobProvider) + { + context.BlobProviderData = new BuiltinBlobProviderData(); + var streamIndex = reader.GetOrdinal("Stream"); + if (!reader.IsDBNull(streamIndex)) + { + var rawData = (byte[]) reader.GetValue(streamIndex); + stream = new MemoryStream(rawData); + } + } + + return new BinaryDataValue + { + Id = binaryPropertyId, + FileId = fileId, + ContentType = reader.GetSafeString("ContentType"), + FileName = new BinaryFileName( + reader.GetSafeString("FileNameWithoutExtension") ?? "", + reader.GetSafeString("Extension") ?? ""), + Size = size, + Checksum = reader.GetSafeString("Checksum"), + BlobProviderName = providerName, + BlobProviderData = providerTextData, + Timestamp = reader.GetSafeLongFromBytes("Timestamp"), + Stream = stream + }; + }).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + public async Task LoadBinaryCacheEntityAsync(int versionId, int propertyTypeId, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + return await LoadBinaryCacheEntityAsync(versionId, propertyTypeId, ctx).ConfigureAwait(false); + } + public async Task LoadBinaryCacheEntityAsync(int versionId, int propertyTypeId, SnDataContext dataContext) + { + if (!(dataContext is PgSqlDataContext sqlCtx)) + throw new PlatformNotSupportedException(); + + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: " + + "LoadBinaryCacheEntity(versionId: {0}, propertyTypeId: {1})", versionId, propertyTypeId); + var result = await sqlCtx.ExecuteReaderAsync(LoadBinaryCacheEntityScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + sqlCtx.CreateParameter("@MaxSize", DbType.Int32, BlobStorageOptions.BinaryCacheSize), + sqlCtx.CreateParameter("@VersionId", DbType.Int32, versionId), + sqlCtx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), + }); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + if (!reader.HasRows || !await reader.ReadAsync(cancel).ConfigureAwait(false)) + return null; + + var length = reader.GetInt64(0); + var binaryPropertyId = reader.GetInt32(1); + var fileId = reader.GetInt32(2); + + var providerName = reader.GetSafeString(3); + var providerTextData = reader.GetSafeString(4); + + byte[] rawData = null; + + var provider = Providers.GetProvider(providerName); + var context = new BlobStorageContext(provider, providerTextData) + { + VersionId = versionId, + PropertyTypeId = propertyTypeId, + FileId = fileId, + Length = length + }; + if (provider is IBuiltInBlobProvider) + { + context.BlobProviderData = new BuiltinBlobProviderData(); + if (!reader.IsDBNull(5)) + rawData = (byte[]) reader.GetValue(5); + } + + return new BinaryCacheEntity + { + Length = length, + RawData = rawData, + BinaryPropertyId = binaryPropertyId, + FileId = fileId, + Context = context + }; + }).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + public async Task StartChunkAsync(IBlobProvider blobProvider, int versionId, int propertyTypeId, long fullSize, + CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: " + + "StartChunk(blobProvider: {0}, versionId: {1}, propertyTypeId: {2}, fullSize: {3})", + blobProvider, versionId, propertyTypeId, fullSize); + + var ctx = new BlobStorageContext(blobProvider) { VersionId = versionId, PropertyTypeId = propertyTypeId, FileId = 0, Length = fullSize }; + string blobProviderName = null; + string blobProviderData = null; + if (!(blobProvider is IBuiltInBlobProvider)) + { + await blobProvider.AllocateAsync(ctx, cancellationToken).ConfigureAwait(false); + blobProviderName = blobProvider.GetType().FullName; + blobProviderData = BlobStorageContext.SerializeBlobProviderData(ctx.BlobProviderData); + } + try + { + using var dctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + using var transaction = dctx.BeginTransaction(); + + // Step 1: Ensure the BinaryProperties row exists (lazy creation). + await dctx.ExecuteNonQueryAsync(InsertStagingBinaryEnsureBinaryPropertyScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + dctx.CreateParameter("@VersionId", DbType.Int32, versionId), + dctx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), + }); + }).ConfigureAwait(false); + + // Step 2: Insert staging file and return BinaryPropertyId + FileId. + var result = await dctx.ExecuteReaderAsync(InsertStagingBinaryScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + dctx.CreateParameter("@VersionId", DbType.Int32, versionId), + dctx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), + dctx.CreateParameter("@Size", DbType.Int64, fullSize), + dctx.CreateParameter("@BlobProvider", DbType.String, 450, blobProviderName != null ? (object)blobProviderName : DBNull.Value), + dctx.CreateParameter("@BlobProviderData", DbType.String, int.MaxValue, blobProviderData != null ? (object)blobProviderData : DBNull.Value), + }); + }, async (reader, cancel) => + { + int binaryPropertyId; + int fileId; + cancel.ThrowIfCancellationRequested(); + if (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + binaryPropertyId = reader.GetSafeInt32(0); + fileId = reader.GetSafeInt32(1); + } + else + { + throw new DataException("File row could not be inserted."); + } + ctx.FileId = fileId; + + return new ChunkToken + { + VersionId = versionId, + PropertyTypeId = propertyTypeId, + BinaryPropertyId = binaryPropertyId, + FileId = fileId + }.GetToken(); + }).ConfigureAwait(false); + transaction.Commit(); + op.Successful = true; + return result; + } + catch (Exception ex) + { + throw new DataException("Error during saving binary chunk to PostgreSQL.", ex); + } + } + + public async System.Threading.Tasks.Task CommitChunkAsync(int versionId, int propertyTypeId, int fileId, long fullSize, BinaryDataValue source, + CancellationToken cancellationToken) + { + try + { + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: CommitChunk: " + + "versionId: {0}, propertyTypeId: {1}, fileId: {2}, fullSize: {3}, contentType: {4}, fileName: {5}", + versionId, propertyTypeId, fileId, fullSize, source?.ContentType ?? "", source?.FileName ?? ""); + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + using var transaction = ctx.BeginTransaction(); + await ctx.ExecuteNonQueryAsync(CommitChunkScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@FileId", DbType.Int32, fileId), + ctx.CreateParameter("@VersionId", DbType.Int32, versionId), + ctx.CreateParameter("@PropertyTypeId", DbType.Int32, propertyTypeId), + ctx.CreateParameter("@Size", DbType.Int64, fullSize), + ctx.CreateParameter("@Checksum", DbType.AnsiString, 200, DBNull.Value), + ctx.CreateParameter("@ContentType", DbType.String, 50, source != null ? source.ContentType : string.Empty), + ctx.CreateParameter("@FileNameWithoutExtension", DbType.String, 450, source != null + ? source.FileName.FileNameWithoutExtension == null + ? DBNull.Value + : (object) source.FileName.FileNameWithoutExtension + : DBNull.Value), + ctx.CreateParameter("@Extension", DbType.String, 50, + source != null ? ValidateExtension(source.FileName.Extension) : string.Empty), + }); + }).ConfigureAwait(false); + transaction.Commit(); + op.Successful = true; + } + catch (Exception ex) + { + throw new DataException("Error during committing binary chunk.", ex); + } + } + + public System.Threading.Tasks.Task CleanupFilesSetDeleteFlagAsync(CancellationToken cancellationToken) + { + return CleanupFilesSetDeleteFlagAsync(CleanupFileSetIsDeletedScript, cancellationToken); + } + public System.Threading.Tasks.Task CleanupFilesSetDeleteFlagImmediatelyAsync(CancellationToken cancellationToken) + { + return CleanupFilesSetDeleteFlagAsync(CleanupFileSetIsDeletedImmediatelyScript, cancellationToken); + } + private async System.Threading.Tasks.Task CleanupFilesSetDeleteFlagAsync(string script, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation(() => "PgSqlBlobMetaDataProvider: " + + $"CleanupFilesSetDeleteFlag: script: {script.ToTrace()}"); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + using var transaction = ctx.BeginTransaction(); + try + { + await ctx.ExecuteNonQueryAsync(script).ConfigureAwait(false); + transaction.Commit(); + } + catch (Exception e) + { + throw new DataException("Error during setting deleted flag on files.", e); + } + + op.Successful = true; + } + + public async Task CleanupFilesAsync(CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: CleanupFiles()"); + using var dctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await dctx.ExecuteReaderAsync(CleanupFileScript, async (reader, cancel) => + { + try + { + var deleted = false; + var fileId = 0; + var size = 0L; + string providerName = null; + string providerData = null; + if (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + deleted = true; + fileId = reader.GetSafeInt32(reader.GetOrdinal("FileId")); + size = reader.GetSafeInt64(reader.GetOrdinal("Size")); + providerName = reader.GetSafeString(reader.GetOrdinal("BlobProvider")); + providerData = reader.GetSafeString(reader.GetOrdinal("BlobProviderData")); + } + + var provider = Providers.GetProvider(providerName); + var ctx = new BlobStorageContext(provider, providerData) { VersionId = 0, PropertyTypeId = 0, FileId = fileId, Length = size }; + + await ctx.Provider.DeleteAsync(ctx, cancel).ConfigureAwait(false); + + return deleted; + } + catch (Exception ex) + { + throw new DataException("Error during binary cleanup.", ex); + } + }).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + private int _waitBetweenCleanupFilesMilliseconds = 0; + public async System.Threading.Tasks.Task CleanupAllFilesAsync(CancellationToken cancellationToken) + { + while (await CleanupFilesAsync(cancellationToken).ConfigureAwait(false)) + { + if(_waitBetweenCleanupFilesMilliseconds != 0) + await System.Threading.Tasks.Task.Delay(_waitBetweenCleanupFilesMilliseconds, cancellationToken).ConfigureAwait(false); + } + } + + public async Task GetFirstFileIdAsync(CancellationToken cancel) + { + int result; + var sql = GetFirstFileId; + cancel.ThrowIfCancellationRequested(); + using var op = SnTrace.Database.StartOperation("PgSqlBlobMetaDataProvider: GetFirstFileIdAsync()"); + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + var scalar = await ctx.ExecuteScalarAsync(sql).ConfigureAwait(false); + if (scalar is int intValue) + result = intValue; + else + result = 0; + op.Successful = true; + return result; + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlBlobMetaDataProviderScripts.cs b/src/ContentRepository.PostgreSql/Data/PgSqlBlobMetaDataProviderScripts.cs new file mode 100644 index 000000000..76679579c --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlBlobMetaDataProviderScripts.cs @@ -0,0 +1,219 @@ +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public partial class PgSqlBlobMetaDataProvider + { + #region GetBlobStorageContextScript + private const string GetBlobStorageContextScript = @"-- PgSqlBlobMetaDataProvider.GetBlobStorageContext +SELECT ""Size"", ""BlobProvider"", ""BlobProviderData"" +FROM ""Files"" WHERE ""FileId"" = @FileId +"; + #endregion + #region ClearStreamByFileIdScript + private const string ClearStreamByFileIdScript = @"-- PgSqlBlobMetaDataProvider.ClearStreamByFileId +UPDATE ""Files"" SET ""Stream"" = NULL WHERE ""FileId"" = @FileId; +"; + #endregion + + #region DeleteBinaryPropertyScript + internal const string DeleteBinaryPropertyScript = @"-- PgSqlBlobMetaDataProvider.DeleteBinaryProperty +DELETE FROM ""BinaryProperties"" WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" = @PropertyTypeId; +"; + #endregion + #region InsertBinaryPropertyScript + private const string InsertBinaryPropertyScript = @"-- PgSqlBlobMetaDataProvider.InsertBinaryProperty +WITH inserted_file AS ( + INSERT INTO ""Files"" (""ContentType"", ""FileNameWithoutExtension"", ""Extension"", ""Size"", ""BlobProvider"", ""BlobProviderData"", ""Checksum"") + VALUES (@ContentType, @FileNameWithoutExtension, @Extension, @Size, @BlobProvider, @BlobProviderData, + CASE WHEN @Size = 0 THEN NULL ELSE @Checksum END) + RETURNING ""FileId"", ""Timestamp"" +), inserted_bp AS ( + INSERT INTO ""BinaryProperties"" (""VersionId"", ""PropertyTypeId"", ""FileId"") + SELECT @VersionId, @PropertyTypeId, f.""FileId"" FROM inserted_file f + RETURNING ""BinaryPropertyId"", ""FileId"" +) +SELECT bp.""BinaryPropertyId"", bp.""FileId"", f.""Timestamp"" +FROM inserted_bp bp +JOIN inserted_file f ON bp.""FileId"" = f.""FileId""; +"; + #endregion + #region DeleteAndInsertBinaryPropertyScript = DeleteBinaryPropertyScript + InsertBinaryPropertyScript + private const string DeleteAndInsertBinaryPropertyScript = DeleteBinaryPropertyScript + InsertBinaryPropertyScript; + #endregion + + #region InsertBinaryPropertyWithKnownFileIdScript + private const string InsertBinaryPropertyWithKnownFileIdScript = @"-- PgSqlBlobMetaDataProvider.InsertBinaryPropertyWithKnownFileId +INSERT INTO ""BinaryProperties"" + (""VersionId"", ""PropertyTypeId"", ""FileId"") VALUES (@VersionId, @PropertyTypeId, @FileId) +RETURNING ""BinaryPropertyId"" +"; + #endregion + #region DeleteAndInsertBinaryPropertyWithKnownFileIdScript + private const string DeleteAndInsertBinaryPropertyWithKnownFileIdScript = DeleteBinaryPropertyScript + InsertBinaryPropertyWithKnownFileIdScript; + #endregion + + #region UpdateBinaryPropertyNewFilerowScript + private const string UpdateBinaryPropertyNewFilerowScript = @"-- PgSqlBlobMetaDataProvider.UpdateBinaryPropertyNewFilerow +WITH inserted_file AS ( + INSERT INTO ""Files"" (""ContentType"", ""FileNameWithoutExtension"", ""Extension"", ""Size"", ""BlobProvider"", ""BlobProviderData"", ""Checksum"", ""Stream"") + VALUES (@ContentType, @FileNameWithoutExtension, @Extension, @Size, @BlobProvider, @BlobProviderData, + CASE WHEN @Size <= 0 THEN NULL ELSE @Checksum END, + CASE WHEN @Size <= 0 THEN NULL ELSE ''::bytea END) + RETURNING ""FileId"" +) +UPDATE ""BinaryProperties"" SET ""FileId"" = f.""FileId"" +FROM inserted_file f +WHERE ""BinaryPropertyId"" = @BinaryPropertyId +RETURNING f.""FileId""; +"; + #endregion + #region UpdateBinaryPropertyScript + // Check whether an existing built-in binary needs a new Files row. + // Returns TRUE when the file row must be replaced (shared FileId or blob-provider mismatch). + private const string UpdateBinaryPropertyNeedNewFileRowScript = @"-- PgSqlBlobMetaDataProvider.UpdateBinaryPropertyNeedNewFileRow +SELECT + CASE + WHEN (@BlobProvider IS NULL) AND EXISTS ( + SELECT 1 FROM ""Files"" f + JOIN ""BinaryProperties"" bp ON bp.""FileId"" = f.""FileId"" + WHERE bp.""BinaryPropertyId"" = @BinaryPropertyId AND f.""BlobProvider"" IS NOT NULL + ) THEN TRUE + WHEN EXISTS ( + SELECT 1 FROM ""BinaryProperties"" bp1 + JOIN ""BinaryProperties"" bp2 ON bp1.""FileId"" = bp2.""FileId"" + WHERE bp1.""BinaryPropertyId"" = @BinaryPropertyId AND bp2.""BinaryPropertyId"" != @BinaryPropertyId + ) THEN TRUE + ELSE FALSE + END AS ""NeedNewRow""; +"; + + // Update an existing Files row in-place (no new row needed). + private const string UpdateBinaryPropertyInPlaceScript = @"-- PgSqlBlobMetaDataProvider.UpdateBinaryPropertyInPlace +UPDATE ""Files"" f +SET ""ContentType"" = @ContentType, + ""FileNameWithoutExtension"" = @FileNameWithoutExtension, + ""Extension"" = @Extension, + ""Size"" = @Size, + ""BlobProvider"" = @BlobProvider, + ""BlobProviderData"" = @BlobProviderData, + ""Checksum"" = CASE WHEN @Size <= 0 THEN NULL ELSE @Checksum END, + ""Stream"" = CASE WHEN @Size <= 0 THEN NULL ELSE ''::bytea END +FROM ""BinaryProperties"" bp +WHERE bp.""BinaryPropertyId"" = @BinaryPropertyId AND f.""FileId"" = bp.""FileId"" +RETURNING f.""FileId""; +"; + #endregion + + #region DeleteBinaryPropertiesScript + private const string DeleteBinaryPropertiesScript = @"-- PgSqlBlobMetaDataProvider.DeleteBinaryProperties +DELETE FROM ""BinaryProperties"" WHERE ""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]) +"; + #endregion + + #region LoadBinaryPropertyScript + private const string LoadBinaryPropertyScript = @"-- PgSqlBlobMetaDataProvider.LoadBinaryProperty +SELECT B.""BinaryPropertyId"", B.""VersionId"", B.""PropertyTypeId"", F.""FileId"", F.""ContentType"", F.""FileNameWithoutExtension"", + F.""Extension"", F.""Size"", F.""Checksum"", NULL AS ""Stream"", 0 AS ""Loaded"", F.""Timestamp"", F.""BlobProvider"", F.""BlobProviderData"" +FROM ""BinaryProperties"" B + JOIN ""Files"" F ON B.""FileId"" = F.""FileId"" +WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" = @PropertyTypeId AND ""Staging"" IS NULL +"; + #endregion + + #region LoadBinaryCacheEntityScript + private const string LoadBinaryCacheEntityScript = @"-- PgSqlBlobMetaDataProvider.LoadBinaryCacheEntity +SELECT F.""Size"", B.""BinaryPropertyId"", F.""FileId"", F.""BlobProvider"", F.""BlobProviderData"", + CASE WHEN F.""Size"" < @MaxSize THEN F.""Stream"" ELSE null END AS ""Stream"" +FROM ""BinaryProperties"" B + JOIN ""Files"" F ON B.""FileId"" = F.""FileId"" +WHERE B.""VersionId"" = @VersionId AND B.""PropertyTypeId"" = @PropertyTypeId AND F.""Staging"" IS NULL +"; + #endregion + + #region InsertStagingBinaryScript + // Two-step approach: first ensure the BinaryProperties row exists, then insert staging file. + // Cannot use DO $$ blocks because PostgreSQL anonymous blocks do not support parameter binding. + private const string InsertStagingBinaryEnsureBinaryPropertyScript = @"-- PgSqlBlobMetaDataProvider.InsertStagingBinaryEnsureBinaryProperty +INSERT INTO ""BinaryProperties"" (""VersionId"", ""PropertyTypeId"", ""FileId"") +SELECT @VersionId, @PropertyTypeId, 0 +WHERE NOT EXISTS ( + SELECT 1 FROM ""BinaryProperties"" + WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" = @PropertyTypeId +); +"; + + private const string InsertStagingBinaryScript = @"-- PgSqlBlobMetaDataProvider.InsertStagingBinary +WITH existing_meta AS ( + SELECT F.""ContentType"", F.""FileNameWithoutExtension"", F.""Extension"" + FROM ""BinaryProperties"" B + JOIN ""Files"" F ON B.""FileId"" = F.""FileId"" + WHERE B.""VersionId"" = @VersionId AND B.""PropertyTypeId"" = @PropertyTypeId + LIMIT 1 +), +defaults AS ( + SELECT + COALESCE((SELECT ""ContentType"" FROM existing_meta), '') AS ""ContentType"", + COALESCE((SELECT ""FileNameWithoutExtension"" FROM existing_meta), '') AS ""FileNameWithoutExtension"", + COALESCE((SELECT ""Extension"" FROM existing_meta), '') AS ""Extension"" +), +inserted_file AS ( + INSERT INTO ""Files"" (""ContentType"", ""FileNameWithoutExtension"", ""Extension"", ""Size"", ""Checksum"", + ""CreationDate"", ""Staging"", ""StagingVersionId"", ""StagingPropertyTypeId"", ""BlobProvider"", ""BlobProviderData"") + SELECT d.""ContentType"", d.""FileNameWithoutExtension"", d.""Extension"", @Size, NULL, + NOW() AT TIME ZONE 'UTC', TRUE, @VersionId, @PropertyTypeId, @BlobProvider, @BlobProviderData + FROM defaults d + RETURNING ""FileId"" +) +SELECT bp.""BinaryPropertyId"", f.""FileId"" +FROM inserted_file f +CROSS JOIN ""BinaryProperties"" bp +WHERE bp.""VersionId"" = @VersionId AND bp.""PropertyTypeId"" = @PropertyTypeId; +"; + #endregion + + #region UpdateStreamWriteChunkSecurityCheckScript + // Plain SQL security check — uses 1/0 division to raise an error when the staging row is not found. + // Cannot use DO $$ blocks because PostgreSQL anonymous blocks do not support parameter binding. + internal static readonly string UpdateStreamWriteChunkSecurityCheckScript = @"-- PgSqlBlobMetaDataProvider.UpdateStreamWriteChunkSecurityCheck +SELECT 1 / (SELECT COUNT(*)::int FROM ""Files"" + WHERE ""FileId"" = @FileId AND ""StagingVersionId"" = @VersionId AND ""StagingPropertyTypeId"" = @PropertyTypeId); +"; + #endregion + #region CommitChunkScript + private static readonly string CommitChunkScript = UpdateStreamWriteChunkSecurityCheckScript + +@"-- PgSqlBlobMetaDataProvider.CommitChunk +UPDATE ""Files"" SET ""Size"" = @Size, ""Checksum"" = @Checksum, ""ContentType"" = @ContentType, + ""FileNameWithoutExtension"" = @FileNameWithoutExtension, ""Extension"" = @Extension, + ""Staging"" = NULL, ""StagingVersionId"" = NULL, ""StagingPropertyTypeId"" = NULL +WHERE ""FileId"" = @FileId; +UPDATE ""BinaryProperties"" SET ""FileId"" = @FileId +WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" = @PropertyTypeId;"; + #endregion + + #region CleanupFileSetIsdeletedScript + private const string CleanupFileSetIsDeletedScript = @"-- PgSqlBlobMetaDataProvider.CleanupFileSetIsDeleted +UPDATE ""Files"" SET ""IsDeleted"" = TRUE +WHERE ""Staging"" IS NULL AND ""CreationDate"" < (NOW() AT TIME ZONE 'UTC') - INTERVAL '30 minutes' + AND ""FileId"" NOT IN (SELECT ""FileId"" FROM ""BinaryProperties"") +"; + #endregion + #region CleanupFileSetIsdeletedImmediatelyScript + private const string CleanupFileSetIsDeletedImmediatelyScript = @"-- PgSqlBlobMetaDataProvider.CleanupFileSetIsDeletedImmediately +UPDATE ""Files"" SET ""IsDeleted"" = TRUE +WHERE ""Staging"" IS NULL AND ""FileId"" NOT IN (SELECT ""FileId"" FROM ""BinaryProperties"") +"; + #endregion + + #region CleanupFileScript + private const string CleanupFileScript = @"-- PgSqlBlobMetaDataProvider.CleanupFile +DELETE FROM ""Files"" +WHERE ctid = (SELECT ctid FROM ""Files"" WHERE ""IsDeleted"" = TRUE LIMIT 1) +RETURNING ""FileId"", ""Size"", ""BlobProvider"", ""BlobProviderData"" +"; + #endregion + + public string GetFirstFileId = @"-- PgSqlBlobMetaDataProvider.GetFirstFileId +SELECT ""FileId"" FROM ""Files"" LIMIT 1 +"; + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlBuiltInBlobProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlBuiltInBlobProvider.cs new file mode 100644 index 000000000..6d9e6ae15 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlBuiltInBlobProvider.cs @@ -0,0 +1,227 @@ +using System; +using System.Data; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Options; +using Npgsql; +using NpgsqlTypes; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Data.MsSqlClient; +using SenseNet.Diagnostics; +using SenseNet.Tools; +// ReSharper disable AccessToDisposedClosure +// ReSharper disable AccessToModifiedClosure + +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + /// + /// The built-in provider for PostgreSQL is responsible for saving bytes directly + /// to the Files table (bytea column). + /// + public class PgSqlBuiltInBlobProvider : IBuiltInBlobProvider + { + private readonly IRetrier _retrier; + + protected DataOptions DataOptions { get; } + private string _connectionString; + + public IBlobStorage BlobStorage { get; set; } + + public PgSqlBuiltInBlobProvider(IOptions options, IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + DataOptions = options?.Value ?? new DataOptions(); + _connectionString = connectionOptions?.Value.Repository; + } + + /// + public object ParseData(string providerData) + { + return BlobStorageContext.DeserializeBlobProviderData(providerData); + } + + /// + /// Throws NotSupportedException. Our algorithms do not use this method of this type. + /// + public System.Threading.Tasks.Task AllocateAsync(BlobStorageContext context, CancellationToken cancellationToken) + { + throw new NotSupportedException(); + } + + private static readonly string WriteStreamScript = @"-- PgSqlBuiltInBlobProvider.WriteStream +UPDATE ""Files"" SET ""Stream"" = @Value WHERE ""FileId"" = @Id;"; + + /// + /// DO NOT USE DIRECTLY THIS METHOD FROM YOUR CODE. + /// + public void AddStream(BlobStorageContext context, Stream stream) + { + if (stream == null || stream.Length == 0L) + return; + UpdateStream(context, stream); + } + public static System.Threading.Tasks.Task AddStreamAsync(BlobStorageContext context, Stream stream, PgSqlDataContext dataContext) + { + if (stream == null || stream.Length == 0L) + return System.Threading.Tasks.Task.CompletedTask; + return UpdateStreamAsync(context, stream, dataContext); + } + + /// + /// DO NOT USE DIRECTLY THIS METHOD FROM YOUR CODE. + /// + public void UpdateStream(BlobStorageContext context, Stream stream) + { + var bufferSize = Convert.ToInt32(stream.Length); + + using (var op = SnTrace.Database.StartOperation("PgSqlBuiltInBlobProvider: " + + "UpdateStream: FileId: {0}, length: {1}.", context.FileId, bufferSize)) + { + var buffer = new byte[bufferSize]; + if (bufferSize > 0) + { + stream.Seek(0, SeekOrigin.Begin); + stream.Read(buffer, 0, bufferSize); + } + + using (var ctx = new PgSqlDataContext(_connectionString, DataOptions, _retrier, CancellationToken.None)) + { + ctx.ExecuteNonQueryAsync(WriteStreamScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Id", DbType.Int32, context.FileId), + ctx.CreateParameter("@Value", NpgsqlDbType.Bytea, bufferSize, buffer), + }); + }).GetAwaiter().GetResult(); + } + op.Successful = true; + } + } + public static async System.Threading.Tasks.Task UpdateStreamAsync(BlobStorageContext context, Stream stream, PgSqlDataContext dataContext) + { + var bufferSize = Convert.ToInt32(stream.Length); + + using (var op = SnTrace.Database.StartOperation("PgSqlBuiltInBlobProvider: " + + "UpdateStreamAsync: FileId: {0}, length: {1}.", context.FileId, bufferSize)) + { + var buffer = new byte[bufferSize]; + if (bufferSize > 0) + { + stream.Seek(0, SeekOrigin.Begin); + stream.Read(buffer, 0, bufferSize); + } + + await dataContext.ExecuteNonQueryAsync(WriteStreamScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + dataContext.CreateParameter("@Id", DbType.Int32, context.FileId), + dataContext.CreateParameter("@Value", NpgsqlDbType.Bytea, bufferSize, buffer), + }); + }).ConfigureAwait(false); + op.Successful = true; + } + } + + /// + public System.Threading.Tasks.Task ClearAsync(BlobStorageContext context, CancellationToken cancellationToken) + { + return System.Threading.Tasks.Task.CompletedTask; + } + + /// + public Stream GetStreamForRead(BlobStorageContext context) + { + if (BlobStorage == null) + throw new InvalidOperationException("BlobStorage back reference is not set."); + + return new RepositoryStream(context.FileId, context.Length, BlobStorage); + } + + /// + public Stream CloneStream(BlobStorageContext context, Stream stream) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + if (BlobStorage == null) + throw new InvalidOperationException("BlobStorage back reference is not set."); + + if (stream is RepositoryStream repoStream) + return new RepositoryStream(repoStream.FileId, repoStream.Length, BlobStorage); + + throw new InvalidOperationException("Unknown stream type: " + stream.GetType().Name); + } + + /// + public System.Threading.Tasks.Task DeleteAsync(BlobStorageContext context, CancellationToken cancellationToken) + { + return System.Threading.Tasks.Task.CompletedTask; + } + + #region LoadBinaryFragmentScript + private const string LoadBinaryFragmentScript = @"SELECT SUBSTRING(""Stream"" FROM @Position::int FOR @Count) FROM ""Files"" WHERE ""FileId"" = @FileId"; + #endregion + public byte[] ReadRandom(BlobStorageContext context, long offset, int count) + { + using var op = SnTrace.Database.StartOperation("PgSqlBuiltInBlobProvider: " + + "ReadRandom: FileId: {0}, offset: {1}, count: {2}", context.FileId, offset, count); + using var ctx = new PgSqlDataContext(_connectionString, DataOptions, _retrier, CancellationToken.None); + var result = (byte[])ctx.ExecuteScalarAsync(LoadBinaryFragmentScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@FileId", DbType.Int32, context.FileId), + ctx.CreateParameter("@Position", DbType.Int64, offset + 1), // PostgreSQL SUBSTRING is 1-based + ctx.CreateParameter("@Count", DbType.Int32, count), + }); + }).GetAwaiter().GetResult(); + op.Successful = true; + + return result; + } + + #region UpdateStreamWriteChunkScript + private static readonly string UpdateStreamWriteChunkScript = PgSqlBlobMetaDataProvider.UpdateStreamWriteChunkSecurityCheckScript + @" +-- init for streaming write +UPDATE ""Files"" SET ""Stream"" = ''::bytea WHERE ""FileId"" = @FileId AND ""Stream"" IS NULL; +-- fill to offset if needed +UPDATE ""Files"" SET ""Stream"" = ""Stream"" || repeat(E'\\000', greatest(0, @Offset::int - octet_length(""Stream"")))::bytea + WHERE ""FileId"" = @FileId AND octet_length(""Stream"") < @Offset; +-- write payload using overlay +UPDATE ""Files"" SET ""Stream"" = overlay(""Stream"" placing @Data from (@Offset + 1)::int for octet_length(@Data)) + WHERE ""FileId"" = @FileId;"; + #endregion + + /// + public async System.Threading.Tasks.Task WriteAsync(BlobStorageContext context, long offset, byte[] buffer, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlBuiltInBlobProvider: " + + "WriteAsync: FileId: {0}, VersionId: {1}, PropertyTypeId: {2}, offset: {3}, buffer length: {4}", + context.FileId, context.VersionId, context.PropertyTypeId, offset, buffer.Length); + using var ctx = new PgSqlDataContext(_connectionString, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync(UpdateStreamWriteChunkScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@FileId", DbType.Int32, context.FileId), + ctx.CreateParameter("@VersionId", DbType.Int32, context.VersionId), + ctx.CreateParameter("@PropertyTypeId", DbType.Int32, context.PropertyTypeId), + ctx.CreateParameter("@Data", NpgsqlDbType.Bytea, buffer.Length, buffer), + ctx.CreateParameter("@Offset", DbType.Int64, offset), + }); + }).ConfigureAwait(false); + op.Successful = true; + } + + /// + /// Throws NotSupportedException. + /// + public Stream GetStreamForWrite(BlobStorageContext context) + { + throw new NotSupportedException(); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlClientStoreDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlClientStoreDataProvider.cs new file mode 100644 index 000000000..7646d0900 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlClientStoreDataProvider.cs @@ -0,0 +1,309 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.Data.Common; +using System.Linq; +using System.Threading; +using Tasks=System.Threading.Tasks; +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Data; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient; +using SenseNet.Diagnostics; +using SenseNet.Tools; +// ReSharper disable AccessToDisposedClosure + +namespace SenseNet.ContentRepository.Security.Clients +{ + public class PgSqlClientStoreDataProvider : IClientStoreDataProvider + { + private readonly IRetrier _retrier; + private DataOptions DataOptions { get; } + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlClientStoreDataProvider(IOptions dataOptions, + IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + DataOptions = dataOptions?.Value ?? new DataOptions(); + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + } + + /* =============================================================================================== LOAD */ + + private static readonly string LoadClientsByRepositorySql = @"-- PgSqlClientStoreDataProvider.LoadClientsByRepository +SELECT * FROM ""ClientApps"" WHERE ""Repository"" = @Repository; +SELECT S.* FROM ""ClientSecrets"" S JOIN ""ClientApps"" A ON S.""ClientId"" = A.""ClientId"" WHERE A.""Repository"" = @Repository; +"; + + public async Tasks.Task LoadClientsByRepositoryAsync(string repositoryHost, + CancellationToken cancellation) + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "LoadClientsByRepository(repositoryHost: {0})", repositoryHost); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + var result = await ctx.ExecuteReaderAsync(LoadClientsByRepositorySql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Repository", DbType.String, 450, repositoryHost)); + }, + async (reader, cancel) => await GetClientsFromReader(reader, cancel)).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + private static readonly string LoadClientsByAuthoritySql = @"-- PgSqlClientStoreDataProvider.LoadClientsByAuthority +SELECT * FROM ""ClientApps"" WHERE ""Authority"" = @Authority; +SELECT S.* FROM ""ClientSecrets"" S JOIN ""ClientApps"" A ON S.""ClientId"" = A.""ClientId"" WHERE A.""Authority"" = @Authority; +"; + + public async Tasks.Task LoadClientsByAuthorityAsync(string authority, + CancellationToken cancellation) + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "LoadClientsByAuthority(authority: {0})", authority); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + var result = await ctx.ExecuteReaderAsync(LoadClientsByAuthoritySql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Authority", DbType.String, 450, authority)); + }, + async (reader, cancel) => await GetClientsFromReader(reader, cancel)).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + private async Tasks.Task GetClientsFromReader(DbDataReader reader, CancellationToken cancel) + { + var clients = new List(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + cancel.ThrowIfCancellationRequested(); + clients.Add(new Client + { + ClientId = reader.GetSafeString(reader.GetOrdinal("ClientId")), + Name = reader.GetSafeString(reader.GetOrdinal("Name")), + Repository = reader.GetSafeString(reader.GetOrdinal("Repository")), + UserName = reader.GetSafeString(reader.GetOrdinal("UserName")), + Authority = reader.GetSafeString(reader.GetOrdinal("Authority")), + Type = (ClientType)reader.GetInt32(reader.GetOrdinal("Type")), + }); + } + await reader.NextResultAsync(cancel); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + cancel.ThrowIfCancellationRequested(); + var clientId = reader.GetString(reader.GetOrdinal("ClientId")); + var client = clients.First(x => x.ClientId == clientId); + client.Secrets.Add(new ClientSecret + { + Id = reader.GetString(reader.GetOrdinal("Id")), + Value = reader.GetString(reader.GetOrdinal("Value")), + CreationDate = reader.GetDateTime(reader.GetOrdinal("CreationDate")), + ValidTill = reader.GetDateTime(reader.GetOrdinal("ValidTill")), + }); + } + + return clients.ToArray(); + } + + /* =============================================================================================== SAVE */ + + private static readonly string UpsertClientSql = @"-- PgSqlClientStoreDataProvider.SaveClient +INSERT INTO ""ClientApps"" (""ClientId"", ""Name"", ""Repository"", ""UserName"", ""Authority"", ""Type"") +VALUES (@ClientId, @Name, @Repository, @UserName, @Authority, @Type) +ON CONFLICT (""ClientId"") DO UPDATE SET + ""Name"" = EXCLUDED.""Name"", ""Repository"" = EXCLUDED.""Repository"", + ""UserName"" = EXCLUDED.""UserName"", ""Authority"" = EXCLUDED.""Authority"", + ""Type"" = EXCLUDED.""Type"" +"; + + private static readonly string DeleteSecretsByClientSql = @"-- PgSqlClientStoreDataProvider.DeleteSecrets +DELETE FROM ""ClientSecrets"" WHERE ""ClientId"" = @ClientId +"; + + public async Tasks.Task SaveClientAsync(Client client, CancellationToken cancellation) + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "SaveClient: ClientId/Name: {0}, Repository: {1}, UserName: {2}, Authority: {3}, Type: {4}({5})", + client?.ClientId, client?.Repository, client?.UserName, + client?.Authority, client?.Type, (int)(client?.Type ?? 0)); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + using var transaction = ctx.BeginTransaction(); + + // UPSERT CLIENT + await ctx.ExecuteNonQueryAsync(UpsertClientSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ClientId", DbType.AnsiString, 50, client.ClientId)); + cmd.Parameters.Add(ctx.CreateParameter("@Name", DbType.String, 450, client.Name ?? client.ClientId)); + cmd.Parameters.Add(ctx.CreateParameter("@Repository", DbType.String, 450, client.Repository)); + cmd.Parameters.Add(ctx.CreateParameter("@UserName", DbType.String, 450, + (object)client.UserName ?? DBNull.Value)); + cmd.Parameters.Add(ctx.CreateParameter("@Authority", DbType.String, 450, client.Authority)); + cmd.Parameters.Add(ctx.CreateParameter("@Type", DbType.Int32, (int)client.Type)); + }).ConfigureAwait(false); + + // DELETE ALL RELATED SECRETS + await ctx.ExecuteNonQueryAsync(DeleteSecretsByClientSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ClientId", DbType.AnsiString, 50, client.ClientId)); + }).ConfigureAwait(false); + + // INSERT SECRETS + foreach (var secret in client.Secrets) + await SaveSecretInternalAsync(client.ClientId, secret, false, ctx); + + transaction.Commit(); + op.Successful = true; + } + + public async Tasks.Task SaveSecretAsync(string clientId, ClientSecret secret, + CancellationToken cancellation) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + await SaveSecretInternalAsync(clientId, secret, true, ctx); + } + + private async Tasks.Task SaveSecretInternalAsync(string clientId, ClientSecret secret, + bool deleteBefore, PgSqlDataContext ctx) + { + var sql = deleteBefore + ? @"-- PgSqlClientStoreDataProvider.SaveSecret (delete and insert) +DELETE FROM ""ClientSecrets"" WHERE ""Id"" = @Id; +INSERT INTO ""ClientSecrets"" (""Id"", ""ClientId"", ""Value"", ""CreationDate"", ""ValidTill"") +VALUES (@Id, @ClientId, @Value, @CreationDate, @ValidTill) +" + : @"-- PgSqlClientStoreDataProvider.SaveSecret (insert only) +INSERT INTO ""ClientSecrets"" (""Id"", ""ClientId"", ""Value"", ""CreationDate"", ""ValidTill"") +VALUES (@Id, @ClientId, @Value, @CreationDate, @ValidTill) +"; + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "SaveSecret: ClientId: {0}, Id: {1}, Value: {2}," + + " CreationDate: {3:yyyy-MM-dd HH:mm:ss.fffff}, ValidTill: {4:yyyy-MM-dd HH:mm:ss.fffff}", + clientId, secret?.Id, secret?.Value, secret?.CreationDate, secret?.ValidTill); + + await ctx.ExecuteNonQueryAsync(sql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ClientId", DbType.AnsiString, 50, clientId)); + cmd.Parameters.Add(ctx.CreateParameter("@Id", DbType.String, 450, secret.Id)); + cmd.Parameters.Add(ctx.CreateParameter("@Value", DbType.String, 450, secret.Value)); + cmd.Parameters.Add(ctx.CreateParameter("@CreationDate", DbType.DateTime2, secret.CreationDate)); + cmd.Parameters.Add(ctx.CreateParameter("@ValidTill", DbType.DateTime2, secret.ValidTill)); + }).ConfigureAwait(false); + + op.Successful = true; + } + + /* =============================================================================================== DELETE */ + + private static readonly string DeleteClientSql = @"-- PgSqlClientStoreDataProvider.DeleteClient +DELETE FROM ""ClientApps"" WHERE ""ClientId"" = @ClientId +"; + + public async Tasks.Task DeleteClientAsync(string clientId, CancellationToken cancellation) + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "DeleteClient(clientId: {0})", clientId); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + using var transaction = ctx.BeginTransaction(); + + // DELETE ALL RELATED SECRETS + await ctx.ExecuteNonQueryAsync(DeleteSecretsByClientSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ClientId", DbType.AnsiString, 50, clientId)); + }).ConfigureAwait(false); + + await ctx.ExecuteNonQueryAsync(DeleteClientSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ClientId", DbType.AnsiString, 50, clientId)); + }).ConfigureAwait(false); + + transaction.Commit(); + op.Successful = true; + } + + private static readonly string DeleteSecretByHostSql = @"-- PgSqlClientStoreDataProvider.DeleteSecretByHost +DELETE FROM ""ClientSecrets"" WHERE ""ClientId"" IN (SELECT ""ClientId"" FROM ""ClientApps"" WHERE ""Repository"" = @Repository) +"; + private static readonly string DeleteClientByHostSql = @"-- PgSqlClientStoreDataProvider.DeleteClientByHost +DELETE FROM ""ClientApps"" WHERE ""Repository"" = @Repository +"; + + public async Tasks.Task DeleteClientByRepositoryHostAsync(string repositoryHost, + CancellationToken cancellation) + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "DeleteClientByRepositoryHost(repositoryHost: {0})", repositoryHost); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + using var transaction = ctx.BeginTransaction(); + + // DELETE ALL RELATED SECRETS + await ctx.ExecuteNonQueryAsync(DeleteSecretByHostSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Repository", DbType.String, 450, repositoryHost)); + }).ConfigureAwait(false); + + await ctx.ExecuteNonQueryAsync(DeleteClientByHostSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@Repository", DbType.String, 450, repositoryHost)); + }).ConfigureAwait(false); + + transaction.Commit(); + op.Successful = true; + } + + private static readonly string DeleteSecretSql = @"-- PgSqlClientStoreDataProvider.DeleteSecret +DELETE FROM ""ClientSecrets"" WHERE ""ClientId"" = @ClientId AND ""Id"" = @SecretId +"; + + public async Tasks.Task DeleteSecretAsync(string clientId, string secretId, + CancellationToken cancellation) + { + using var op = SnTrace.Database.StartOperation("PgSqlClientStoreDataProvider: " + + "DeleteSecret(clientId: {0}, secretId: {1})", clientId, secretId); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellation); + await ctx.ExecuteNonQueryAsync(DeleteSecretSql, cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ClientId", DbType.AnsiString, 50, clientId)); + cmd.Parameters.Add(ctx.CreateParameter("@SecretId", DbType.AnsiString, 50, secretId)); + }).ConfigureAwait(false); + + op.Successful = true; + } + + // =============================================================================================== Installation + + public static readonly string DropAndCreateTablesSql = @"-- PgSqlClientStoreDataProvider.CreateTables +DROP TABLE IF EXISTS ""ClientSecrets"" CASCADE; +DROP TABLE IF EXISTS ""ClientApps"" CASCADE; + +CREATE TABLE IF NOT EXISTS ""ClientApps"" ( + ""ClientId"" VARCHAR(50) NOT NULL PRIMARY KEY, + ""Name"" VARCHAR(450) NULL, + ""Repository"" VARCHAR(450) NULL, + ""UserName"" VARCHAR(450) NULL, + ""Authority"" VARCHAR(450) NULL, + ""Type"" INT NULL +); + +CREATE INDEX IF NOT EXISTS ""IX_ClientApps_Repository"" ON ""ClientApps"" (""Repository""); +CREATE INDEX IF NOT EXISTS ""IX_ClientApps_Authority"" ON ""ClientApps"" (""Authority""); + +CREATE TABLE IF NOT EXISTS ""ClientSecrets"" ( + ""Id"" VARCHAR(50) NOT NULL PRIMARY KEY, + ""ClientId"" VARCHAR(50) NOT NULL REFERENCES ""ClientApps"" (""ClientId""), + ""Value"" VARCHAR(450) NOT NULL, + ""CreationDate"" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + ""ValidTill"" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +CREATE INDEX IF NOT EXISTS ""IX_ClientSecrets_ClientId"" ON ""ClientSecrets"" (""ClientId""); +"; + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlDataContext.cs b/src/ContentRepository.PostgreSql/Data/PgSqlDataContext.cs new file mode 100644 index 000000000..bd67f4d5f --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlDataContext.cs @@ -0,0 +1,162 @@ +using System; +using System.Collections.Concurrent; +using System.Data; +using System.Data.Common; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Options; +using Npgsql; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.Data; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + /// + /// PostgreSQL implementation of SnDataContext. Provides connection, command and + /// parameter factory methods using Npgsql. + /// + public class PgSqlDataContext : SnDataContext + { + private readonly string _connectionString; + private static readonly ConcurrentDictionary DataSources = new(); + + public PgSqlDataContext(string connectionString, DataOptions options, IRetrier retrier, + CancellationToken cancellationToken = default) + : base(options, retrier, cancellationToken) + { + _connectionString = connectionString + ?? throw new ArgumentNullException(nameof(connectionString)); + } + + private static NpgsqlDataSource GetOrCreateDataSource(string connectionString) + { + return DataSources.GetOrAdd(connectionString, cs => + { + var builder = new NpgsqlDataSourceBuilder(cs); + builder.EnableDynamicJson(); + return builder.Build(); + }); + } + + public override DbConnection CreateConnection() + { + return GetOrCreateDataSource(_connectionString).CreateConnection(); + } + + public override DbCommand CreateCommand() + { + return new NpgsqlCommand(); + } + + public override DbParameter CreateParameter() + { + return new NpgsqlParameter(); + } + + /// + /// Overrides base CreateParameter to handle Npgsql strict type checking. + /// Npgsql requires the CLR value type to match the NpgsqlDbType exactly. + /// + public override DbParameter CreateParameter(string name, DbType dbType, object value) + { + // PostgreSQL timestamps are BIGINT, not binary rowversion. + // ConvertInt64ToTimestamp returns long on PgSql, but the base class passes DbType.Binary. + if (dbType == DbType.Binary && value is long longVal) + { + return new NpgsqlParameter + { + ParameterName = name, + NpgsqlDbType = NpgsqlTypes.NpgsqlDbType.Bigint, + Value = longVal + }; + } + + return base.CreateParameter(name, dbType, CoerceParameterValue(dbType, value)); + } + + /// + /// Overrides base CreateParameter to handle Npgsql strict type checking. + /// + public override DbParameter CreateParameter(string name, DbType dbType, int size, object value) + { + if (dbType == DbType.Binary && value is long longVal) + { + return new NpgsqlParameter + { + ParameterName = name, + NpgsqlDbType = NpgsqlTypes.NpgsqlDbType.Bigint, + Value = longVal + }; + } + + return base.CreateParameter(name, dbType, size, CoerceParameterValue(dbType, value)); + } + + /// + /// Coerces parameter values to match the expected CLR type for the given DbType. + /// Npgsql (unlike SqlClient) strictly enforces type matching between DbType and CLR value type. + /// + private static object CoerceParameterValue(DbType dbType, object value) + { + if (value == null || value == DBNull.Value) + return value; + + switch (dbType) + { + case DbType.Byte: + // Npgsql maps DbType.Byte to NpgsqlDbType.Smallint (PostgreSQL int2). + // It expects short (Int16), not byte. C# ternary (byte)1 : 0 returns int. + if (value is not short) + return Convert.ToInt16(value); + break; + + case DbType.Int16: + // Version.Major/Minor are int, but DbType.Int16 needs short. + if (value is not short) + return Convert.ToInt16(value); + break; + } + + return value; + } + + /// + /// Creates a parameter with NpgsqlDbType for PostgreSQL-specific types. + /// + public DbParameter CreateParameter(string name, NpgsqlTypes.NpgsqlDbType dbType, object value) + { + var prm = new NpgsqlParameter + { + ParameterName = name, + NpgsqlDbType = dbType, + Value = value ?? DBNull.Value + }; + return prm; + } + + /// + /// Creates a parameter with NpgsqlDbType and explicit size. + /// + public DbParameter CreateParameter(string name, NpgsqlTypes.NpgsqlDbType dbType, int size, object value) + { + var prm = new NpgsqlParameter + { + ParameterName = name, + NpgsqlDbType = dbType, + Size = size, + Value = value ?? DBNull.Value + }; + return prm; + } + + public override TransactionWrapper WrapTransaction(DbTransaction underlyingTransaction, + CancellationToken cancellationToken, TimeSpan timeout = default) + { + return new TransactionWrapper(underlyingTransaction, DataOptions, timeout, cancellationToken); + } + + public string GetConnectionString() => _connectionString; + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlDataInstaller.cs b/src/ContentRepository.PostgreSql/Data/PgSqlDataInstaller.cs new file mode 100644 index 000000000..313150c08 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlDataInstaller.cs @@ -0,0 +1,454 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.Linq; +using System.Threading; +using STT=System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Npgsql; +using NpgsqlTypes; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.DataModel; +using SenseNet.ContentRepository.Storage.Schema; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public class PgSqlDataInstaller : IDataInstaller + { + private static readonly byte Yes = 1; + private static readonly byte No = 0; + + private ILogger _logger; + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlDataInstaller(IOptions connectionOptions, + ILogger logger) + { + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + _logger = logger; + } + + public async STT.Task InstallInitialDataAsync(InitialData data, DataProvider dataProvider, CancellationToken cancel) + { + if (dataProvider is not PgSqlDataProvider pgdp) + throw new InvalidOperationException("PgSqlDataInstaller error: data provider is expected to be PgSqlDataProvider."); + + var connectionString = ConnectionStrings.Repository; + + // Install schema types first + await BulkInsertPropertyTypesAsync(data.Schema.PropertyTypes, connectionString, cancel, pgdp).ConfigureAwait(false); + await BulkInsertNodeTypesAsync(data.Schema.NodeTypes, connectionString, cancel, pgdp).ConfigureAwait(false); + + // Install nodes + var now = DateTime.UtcNow; + foreach (var node in data.Nodes) + { + node.CreationDate = now; + node.ModificationDate = now; + } + await BulkInsertNodesAsync(data.Nodes.ToList(), connectionString, cancel, pgdp).ConfigureAwait(false); + + // Install versions and related data + var longTextId = 0; + var refPropId = 0; + var allLongTexts = new List<(int Id, int VersionId, int PropertyTypeId, string Value)>(); + var allRefProps = new List<(int Id, int VersionId, int PropertyTypeId, int ReferredNodeId)>(); + var allBinaryProps = new List<(int BinaryPropertyId, int VersionId, int PropertyTypeId, int FileId)>(); + var allFiles = new List<(int FileId, string ContentType, string FileNameWithoutExtension, string Extension, long Size, byte[] Stream, string BlobProvider, string BlobProviderData)>(); + var allVersions = new List<(VersionData Version, IDictionary DynamicProperties)>(); + + foreach (var version in data.Versions) + { + version.CreationDate = now; + version.ModificationDate = now; + + var props = data.DynamicProperties.FirstOrDefault(x => x.VersionId == version.VersionId); + allVersions.Add((version, props?.DynamicProperties)); + + if (props?.LongTextProperties != null) + { + foreach (var longTextData in props.LongTextProperties) + { + var propertyTypeId = data.Schema.PropertyTypes.FirstOrDefault(x => x.Name == longTextData.Key.Name)?.Id ?? 0; + allLongTexts.Add((++longTextId, version.VersionId, propertyTypeId, longTextData.Value)); + } + } + if (props?.ReferenceProperties != null) + { + foreach (var referenceData in props.ReferenceProperties) + { + var propertyTypeId = data.Schema.PropertyTypes.FirstOrDefault(x => x.Name == referenceData.Key.Name)?.Id ?? 0; + foreach (var value in referenceData.Value) + { + allRefProps.Add((++refPropId, version.VersionId, propertyTypeId, value)); + } + } + } + if (props?.BinaryProperties != null) + { + foreach (var binaryPropertyData in props.BinaryProperties) + { + var propertyTypeId = data.Schema.PropertyTypes.FirstOrDefault(x => x.Name == binaryPropertyData.Key.Name)?.Id ?? 0; + allBinaryProps.Add((binaryPropertyData.Value.Id, version.VersionId, propertyTypeId, binaryPropertyData.Value.FileId)); + + byte[] buffer = null; + var providerName = binaryPropertyData.Value.BlobProviderName; + var providerData = binaryPropertyData.Value.BlobProviderData; + if (providerName == null && providerData != null && + providerData.StartsWith("/Root", StringComparison.OrdinalIgnoreCase)) + { + buffer = data.GetBlobBytes(providerData, binaryPropertyData.Key.Name); + providerData = null; + } + + allFiles.Add(( + binaryPropertyData.Value.FileId, + binaryPropertyData.Value.ContentType, + binaryPropertyData.Value.FileName.FileNameWithoutExtension, + binaryPropertyData.Value.FileName.Extension, + buffer?.Length ?? binaryPropertyData.Value.Size, + buffer, + providerName, + providerData)); + } + } + } + + await BulkInsertVersionsAsync(allVersions, connectionString, cancel, pgdp).ConfigureAwait(false); + await BulkInsertLongTextPropertiesAsync(allLongTexts, connectionString, cancel).ConfigureAwait(false); + await BulkInsertReferencePropertiesAsync(allRefProps, connectionString, cancel).ConfigureAwait(false); + await BulkInsertFilesAsync(allFiles, connectionString, cancel).ConfigureAwait(false); + await BulkInsertBinaryPropertiesAsync(allBinaryProps, connectionString, cancel).ConfigureAwait(false); + + // Re-enable foreign key triggers + await ReEnableTriggersAsync(connectionString, cancel).ConfigureAwait(false); + + // Resync all SERIAL sequences to max(id) after bulk inserts with explicit IDs + await ResyncSequencesAsync(connectionString, cancel).ConfigureAwait(false); + } + + /// + /// Re-enables triggers that were disabled before data loading. + /// + private async STT.Task ReEnableTriggersAsync(string connectionString, CancellationToken cancel) + { + _logger.LogTrace("Re-enabling triggers after bulk insert."); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + var sql = @" +ALTER TABLE ""BinaryProperties"" ENABLE TRIGGER ALL; +ALTER TABLE ""Nodes"" ENABLE TRIGGER ALL; +ALTER TABLE ""ReferenceProperties"" ENABLE TRIGGER ALL; +ALTER TABLE ""LongTextProperties"" ENABLE TRIGGER ALL; +ALTER TABLE ""Versions"" ENABLE TRIGGER ALL; +"; + await using var cmd = new NpgsqlCommand(sql, connection); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + /// + /// After bulk inserts with explicit ID values, PostgreSQL SERIAL sequences are out of sync. + /// This resets each sequence to MAX(id)+1 so subsequent inserts get correct IDs. + /// + private async STT.Task ResyncSequencesAsync(string connectionString, CancellationToken cancel) + { + _logger.LogTrace("Resyncing SERIAL sequences after bulk insert."); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + // Each entry: (table, id_column) + var tables = new[] + { + ("NodeTypes", "NodeTypeId"), + ("PropertyTypes", "PropertyTypeId"), + ("Nodes", "NodeId"), + ("Versions", "VersionId"), + ("Files", "FileId"), + ("BinaryProperties", "BinaryPropertyId"), + ("ReferenceProperties", "ReferencePropertyId"), + ("LongTextProperties", "LongTextPropertyId"), + }; + + foreach (var (table, idCol) in tables) + { + // pg_get_serial_sequence returns the sequence name for a SERIAL column + var sql = $@"SELECT setval(pg_get_serial_sequence('""{table}""', '{idCol}'), + COALESCE((SELECT MAX(""{idCol}"") FROM ""{table}""), 0) + 1, false);"; + await using var cmd = new NpgsqlCommand(sql, connection); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + } + + /* ==================================================================================================== Bulk insert methods */ + + private async STT.Task BulkInsertPropertyTypesAsync(List propertyTypes, string connectionString, + CancellationToken cancel, PgSqlDataProvider dataProvider) + { + _logger.LogTrace("BulkInsert: deleting from table PropertyTypes"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""PropertyTypes""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var pt in propertyTypes) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""PropertyTypes"" (""PropertyTypeId"", ""Name"", ""DataType"", ""Mapping"", ""IsContentListProperty"") + VALUES (@Id, @Name, @DataType, @Mapping, @IsContentListProperty)", connection); + cmd.Parameters.AddWithValue("@Id", pt.Id); + cmd.Parameters.AddWithValue("@Name", pt.Name); + cmd.Parameters.AddWithValue("@DataType", pt.DataType.ToString()); + cmd.Parameters.AddWithValue("@Mapping", pt.Mapping); + cmd.Parameters.AddWithValue("@IsContentListProperty", pt.IsContentListProperty ? Yes : No); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {propertyTypes.Count} records into table PropertyTypes."); + } + + private async STT.Task BulkInsertNodeTypesAsync(List nodeTypes, string connectionString, + CancellationToken cancel, PgSqlDataProvider dataProvider) + { + _logger.LogTrace("BulkInsert: deleting from table NodeTypes"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""NodeTypes""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var nt in nodeTypes) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""NodeTypes"" (""NodeTypeId"", ""ParentId"", ""Name"", ""ClassName"", ""Properties"") + VALUES (@Id, @ParentId, @Name, @ClassName, @Properties)", connection); + cmd.Parameters.AddWithValue("@Id", nt.Id); + var parentId = nodeTypes.FirstOrDefault(x => x.Name == nt.ParentName)?.Id; + cmd.Parameters.AddWithValue("@ParentId", (object)parentId ?? DBNull.Value); + cmd.Parameters.AddWithValue("@Name", nt.Name); + cmd.Parameters.AddWithValue("@ClassName", nt.ClassName); + cmd.Parameters.AddWithValue("@Properties", string.Join(" ", nt.Properties)); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {nodeTypes.Count} records into table NodeTypes."); + } + + private async STT.Task BulkInsertNodesAsync(List nodes, string connectionString, + CancellationToken cancel, PgSqlDataProvider dataProvider) + { + _logger.LogTrace("BulkInsert: deleting from table Nodes"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""Nodes""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var node in nodes) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""Nodes"" (""NodeId"", ""NodeTypeId"", ""CreatingInProgress"", ""IsDeleted"", ""IsInherited"", + ""ParentNodeId"", ""Name"", ""Path"", ""Index"", ""Locked"", ""ETag"", ""LockType"", ""LockTimeout"", + ""LockDate"", ""LockToken"", ""LastLockUpdate"", ""LastMinorVersionId"", ""LastMajorVersionId"", + ""CreationDate"", ""CreatedById"", ""ModificationDate"", ""ModifiedById"", ""IsSystem"", ""OwnerId"", ""SavingState"") + VALUES (@NodeId, @NodeTypeId, @CreatingInProgress, @IsDeleted, @IsInherited, + @ParentNodeId, @Name, @Path, @Index, @Locked, @ETag, @LockType, @LockTimeout, + @LockDate, @LockToken, @LastLockUpdate, @LastMinorVersionId, @LastMajorVersionId, + @CreationDate, @CreatedById, @ModificationDate, @ModifiedById, @IsSystem, @OwnerId, @SavingState)", connection); + cmd.Parameters.AddWithValue("@NodeId", node.NodeId); + cmd.Parameters.AddWithValue("@NodeTypeId", node.NodeTypeId); + cmd.Parameters.AddWithValue("@CreatingInProgress", node.CreatingInProgress ? Yes : No); + cmd.Parameters.AddWithValue("@IsDeleted", node.IsDeleted ? Yes : No); + cmd.Parameters.AddWithValue("@IsInherited", (byte)0); + cmd.Parameters.AddWithValue("@ParentNodeId", node.ParentNodeId); + cmd.Parameters.AddWithValue("@Name", node.Name); + cmd.Parameters.AddWithValue("@Path", node.Path); + cmd.Parameters.AddWithValue("@Index", node.Index); + cmd.Parameters.AddWithValue("@Locked", node.Locked ? Yes : No); + cmd.Parameters.AddWithValue("@ETag", (object)(node.ETag ?? string.Empty)); + cmd.Parameters.AddWithValue("@LockType", node.LockType); + cmd.Parameters.AddWithValue("@LockTimeout", node.LockTimeout); + cmd.Parameters.AddWithValue("@LockDate", AlignDateTime(node.LockDate, dataProvider)); + cmd.Parameters.AddWithValue("@LockToken", (object)(node.LockToken ?? string.Empty)); + cmd.Parameters.AddWithValue("@LastLockUpdate", AlignDateTime(node.LastLockUpdate, dataProvider)); + cmd.Parameters.AddWithValue("@LastMinorVersionId", node.LastMinorVersionId); + cmd.Parameters.AddWithValue("@LastMajorVersionId", node.LastMajorVersionId); + cmd.Parameters.AddWithValue("@CreationDate", AlignDateTime(node.CreationDate, dataProvider)); + cmd.Parameters.AddWithValue("@CreatedById", node.CreatedById); + cmd.Parameters.AddWithValue("@ModificationDate", AlignDateTime(node.ModificationDate, dataProvider)); + cmd.Parameters.AddWithValue("@ModifiedById", node.ModifiedById); + cmd.Parameters.AddWithValue("@IsSystem", node.IsSystem ? Yes : No); + cmd.Parameters.AddWithValue("@OwnerId", node.OwnerId); + cmd.Parameters.AddWithValue("@SavingState", (int)node.SavingState); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {nodes.Count} records into table Nodes."); + } + + private async STT.Task BulkInsertVersionsAsync( + List<(VersionData Version, IDictionary DynamicProperties)> versions, + string connectionString, CancellationToken cancel, PgSqlDataProvider dataProvider) + { + _logger.LogTrace("BulkInsert: deleting from table Versions"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""Versions""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var (version, dynamicProperties) in versions) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""Versions"" (""VersionId"", ""NodeId"", ""MajorNumber"", ""MinorNumber"", ""Status"", + ""CreationDate"", ""CreatedById"", ""ModificationDate"", ""ModifiedById"", + ""IndexDocument"", ""ChangedData"", ""DynamicProperties"") + VALUES (@VersionId, @NodeId, @MajorNumber, @MinorNumber, @Status, + @CreationDate, @CreatedById, @ModificationDate, @ModifiedById, + @IndexDocument, @ChangedData, @DynamicProperties)", connection); + cmd.Parameters.AddWithValue("@VersionId", version.VersionId); + cmd.Parameters.AddWithValue("@NodeId", version.NodeId); + cmd.Parameters.AddWithValue("@MajorNumber", (short)version.Version.Major); + cmd.Parameters.AddWithValue("@MinorNumber", (short)version.Version.Minor); + cmd.Parameters.AddWithValue("@Status", (short)version.Version.Status); + cmd.Parameters.AddWithValue("@CreationDate", AlignDateTime(version.CreationDate, dataProvider)); + cmd.Parameters.AddWithValue("@CreatedById", version.CreatedById); + cmd.Parameters.AddWithValue("@ModificationDate", AlignDateTime(version.ModificationDate, dataProvider)); + cmd.Parameters.AddWithValue("@ModifiedById", version.ModifiedById); + cmd.Parameters.AddWithValue("@IndexDocument", DBNull.Value); + cmd.Parameters.AddWithValue("@ChangedData", DBNull.Value); + var dynPropValue = dynamicProperties == null ? null : dataProvider.SerializeDynamicProperties(dynamicProperties); + cmd.Parameters.AddWithValue("@DynamicProperties", (object)dynPropValue ?? DBNull.Value); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {versions.Count} records into table Versions."); + } + + private async STT.Task BulkInsertLongTextPropertiesAsync( + List<(int Id, int VersionId, int PropertyTypeId, string Value)> longTexts, + string connectionString, CancellationToken cancel) + { + _logger.LogTrace("BulkInsert: deleting from table LongTextProperties"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""LongTextProperties""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var lt in longTexts) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""LongTextProperties"" (""LongTextPropertyId"", ""VersionId"", ""PropertyTypeId"", ""Length"", ""Value"") + VALUES (@Id, @VersionId, @PropertyTypeId, @Length, @Value)", connection); + cmd.Parameters.AddWithValue("@Id", lt.Id); + cmd.Parameters.AddWithValue("@VersionId", lt.VersionId); + cmd.Parameters.AddWithValue("@PropertyTypeId", lt.PropertyTypeId); + cmd.Parameters.AddWithValue("@Length", (object)lt.Value?.Length ?? DBNull.Value); + cmd.Parameters.AddWithValue("@Value", (object)lt.Value ?? DBNull.Value); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {longTexts.Count} records into table LongTextProperties."); + } + + private async STT.Task BulkInsertReferencePropertiesAsync( + List<(int Id, int VersionId, int PropertyTypeId, int ReferredNodeId)> refProps, + string connectionString, CancellationToken cancel) + { + _logger.LogTrace("BulkInsert: deleting from table ReferenceProperties"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""ReferenceProperties""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var rp in refProps) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""ReferenceProperties"" (""ReferencePropertyId"", ""VersionId"", ""PropertyTypeId"", ""ReferredNodeId"") + VALUES (@Id, @VersionId, @PropertyTypeId, @ReferredNodeId)", connection); + cmd.Parameters.AddWithValue("@Id", rp.Id); + cmd.Parameters.AddWithValue("@VersionId", rp.VersionId); + cmd.Parameters.AddWithValue("@PropertyTypeId", rp.PropertyTypeId); + cmd.Parameters.AddWithValue("@ReferredNodeId", rp.ReferredNodeId); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {refProps.Count} records into table ReferenceProperties."); + } + + private async STT.Task BulkInsertFilesAsync( + List<(int FileId, string ContentType, string FileNameWithoutExtension, string Extension, long Size, byte[] Stream, string BlobProvider, string BlobProviderData)> files, + string connectionString, CancellationToken cancel) + { + _logger.LogTrace("BulkInsert: deleting from table Files"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""Files""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var file in files) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""Files"" (""FileId"", ""ContentType"", ""FileNameWithoutExtension"", ""Extension"", + ""Size"", ""Stream"", ""CreationDate"", ""BlobProvider"", ""BlobProviderData"") + VALUES (@FileId, @ContentType, @FileNameWithoutExtension, @Extension, + @Size, @Stream, @CreationDate, @BlobProvider, @BlobProviderData)", connection); + cmd.Parameters.AddWithValue("@FileId", file.FileId); + cmd.Parameters.AddWithValue("@ContentType", file.ContentType); + cmd.Parameters.AddWithValue("@FileNameWithoutExtension", (object)file.FileNameWithoutExtension ?? DBNull.Value); + cmd.Parameters.AddWithValue("@Extension", file.Extension); + cmd.Parameters.AddWithValue("@Size", file.Size); + cmd.Parameters.Add(new NpgsqlParameter("@Stream", NpgsqlDbType.Bytea) { Value = (object)file.Stream ?? DBNull.Value }); + cmd.Parameters.AddWithValue("@CreationDate", DateTime.UtcNow); + cmd.Parameters.AddWithValue("@BlobProvider", (object)file.BlobProvider ?? DBNull.Value); + cmd.Parameters.AddWithValue("@BlobProviderData", (object)file.BlobProviderData ?? DBNull.Value); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {files.Count} records into table Files."); + } + + private async STT.Task BulkInsertBinaryPropertiesAsync( + List<(int BinaryPropertyId, int VersionId, int PropertyTypeId, int FileId)> binaryProps, + string connectionString, CancellationToken cancel) + { + _logger.LogTrace("BulkInsert: deleting from table BinaryProperties"); + await using var connection = new NpgsqlConnection(connectionString); + await connection.OpenAsync(cancel).ConfigureAwait(false); + + await using (var cmd = new NpgsqlCommand(@"DELETE FROM ""BinaryProperties""", connection)) + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + + foreach (var bp in binaryProps) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""BinaryProperties"" (""BinaryPropertyId"", ""VersionId"", ""PropertyTypeId"", ""FileId"") + VALUES (@BinaryPropertyId, @VersionId, @PropertyTypeId, @FileId)", connection); + cmd.Parameters.AddWithValue("@BinaryPropertyId", bp.BinaryPropertyId); + cmd.Parameters.AddWithValue("@VersionId", bp.VersionId); + cmd.Parameters.AddWithValue("@PropertyTypeId", bp.PropertyTypeId); + cmd.Parameters.AddWithValue("@FileId", bp.FileId); + await cmd.ExecuteNonQueryAsync(cancel).ConfigureAwait(false); + } + + _logger.LogTrace($"BulkInsert: inserted {binaryProps.Count} records into table BinaryProperties."); + } + + /* ==================================================================================================== Tools */ + + private static DateTime AlignDateTime(DateTime dateTime, PgSqlDataProvider dataProvider) + { + if (dateTime > dataProvider.DateTimeMaxValue) + dateTime = dataProvider.DateTimeMaxValue; + if (dateTime < dataProvider.DateTimeMinValue) + dateTime = dataProvider.DateTimeMinValue; + return dateTime; + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlDataProvider.cs new file mode 100644 index 000000000..353e962f8 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlDataProvider.cs @@ -0,0 +1,534 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.Data.Common; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Npgsql; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Search.Querying; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient; +using SenseNet.ContentRepository.Storage.DataModel; +using SenseNet.ContentRepository.Storage.Schema; +using SenseNet.Diagnostics; +using SenseNet.Storage.Data.PgSqlClient; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + /// + /// PostgreSQL implementation of the relational data provider. + /// + public partial class PgSqlDataProvider : RelationalDataProviderBase + { + private readonly IOptions _connectionOptions; + private readonly DataOptions _dataOptions; + private readonly PgSqlDatabaseInstallationOptions _dbInstallerOptions; + private readonly PgSqlDatabaseInstaller _databaseInstaller; + private readonly IDataInstaller _dataInstaller; + private readonly ILogger _logger; + private readonly IRetrier _retrier; + + public PgSqlDataProvider( + IOptions dataOptions, + IOptions connectionOptions, + IOptions dbInstallerOptions, + PgSqlDatabaseInstaller databaseInstaller, + IDataInstaller dataInstaller, + ILogger logger, + IRetrier retrier) + { + _connectionOptions = connectionOptions; + _dataOptions = dataOptions?.Value; + _dbInstallerOptions = dbInstallerOptions?.Value ?? new PgSqlDatabaseInstallationOptions(); + _databaseInstaller = databaseInstaller; + _dataInstaller = dataInstaller ?? throw new ArgumentNullException(nameof(dataInstaller)); + _logger = logger; + _retrier = retrier; + } + + public override SnDataContext CreateDataContext(CancellationToken cancellationToken) + { + return new PgSqlDataContext(_connectionOptions.Value.Repository, _dataOptions, _retrier, cancellationToken); + } + + public override SchemaWriter CreateSchemaWriter() + { + return new PgSqlSchemaWriter(_connectionOptions); + } + + // =============================================================================================== Queries + + public override async Task> QueryNodesByTypeAndPathAndNameAsync( + int[] nodeTypeIds, string[] pathStart, bool orderByPath, string name, + CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation(() => "PgSqlDataProvider: " + + $"QueryNodesByTypeAndPathAndName(nodeTypeIds: {nodeTypeIds?.ToTrace() ?? "null"}, " + + $"pathStart: {pathStart?.ToTrace() ?? "null"}, orderByPath: {orderByPath}, name: {name})"); + + var sql = new System.Text.StringBuilder( + "SELECT N.\"NodeId\" FROM \"Nodes\" N"); + + if (nodeTypeIds != null && nodeTypeIds.Length > 0) + { + sql.Append(" JOIN \"NodeTypes\" T ON N.\"NodeTypeId\" = T.\"NodeTypeId\" WHERE T.\"NodeTypeId\" IN ("); + sql.Append(string.Join(", ", Enumerable.Range(0, nodeTypeIds.Length).Select(i => "@TypeId" + i))); + sql.Append(")"); + } + else + { + sql.Append(" WHERE 1=1"); + } + + if (pathStart != null && pathStart.Length > 0) + { + sql.Append(" AND ("); + for (var i = 0; i < pathStart.Length; i++) + { + if (i > 0) sql.Append(" OR "); + sql.Append($"N.\"Path\" LIKE @Path{i} || '/%' OR N.\"Path\" = @Path{i}"); + } + sql.Append(")"); + } + + if (name != null) + sql.Append(" AND N.\"Name\" = @Name"); + + if (orderByPath) + sql.Append(" ORDER BY N.\"Path\""); + + using var ctx = CreateDataContext(cancellationToken); + var heads = await ctx.ExecuteReaderAsync(sql.ToString(), cmd => + { + if (nodeTypeIds != null) + for (var i = 0; i < nodeTypeIds.Length; i++) + cmd.Parameters.Add(ctx.CreateParameter("@TypeId" + i, DbType.Int32, nodeTypeIds[i])); + if (pathStart != null) + for (var i = 0; i < pathStart.Length; i++) + cmd.Parameters.Add(ctx.CreateParameter("@Path" + i, DbType.String, pathStart[i].TrimEnd('/'))); + if (name != null) + cmd.Parameters.Add(ctx.CreateParameter("@Name", DbType.String, 450, name)); + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + var result = new List(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + result.Add(reader.GetInt32(0)); + return (IEnumerable)result; + }).ConfigureAwait(false); + + op.Successful = true; + return heads; + } + + public override async Task> QueryNodesByTypeAndPathAndPropertyAsync( + int[] nodeTypeIds, string pathStart, bool orderByPath, + List properties, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation(() => "PgSqlDataProvider: " + + $"QueryNodesByTypeAndPathAndProperty(nodeTypeIds: {nodeTypeIds.ToTrace()}, " + + $"pathStart: {pathStart}, orderByPath: {orderByPath}, properties: {properties.Count})"); + + var sql = new System.Text.StringBuilder( + "SELECT N.\"NodeId\" FROM \"Nodes\" N JOIN \"NodeTypes\" T ON N.\"NodeTypeId\" = T.\"NodeTypeId\"" + + " JOIN \"Versions\" V ON N.\"NodeId\" = V.\"NodeId\" AND V.\"VersionId\" = N.\"LastMinorVersionId\"" + + " WHERE T.\"NodeTypeId\" IN ("); + sql.Append(string.Join(", ", Enumerable.Range(0, nodeTypeIds.Length).Select(i => "@TypeId" + i))); + sql.Append(")"); + + if (!string.IsNullOrEmpty(pathStart)) + sql.Append(" AND (N.\"Path\" LIKE @Path || '/%' OR N.\"Path\" = @Path)"); + + var paramIndex = 0; + foreach (var prop in properties) + { + if (prop.QueryOperator == Operator.Equal) + { + sql.Append($" AND V.\"DynamicProperties\" LIKE '%' || @PropName{paramIndex} || ':' || @PropValue{paramIndex} || E'\\r\\n%'"); + } + paramIndex++; + } + + if (orderByPath) + sql.Append(" ORDER BY N.\"Path\""); + + using var ctx = CreateDataContext(cancellationToken); + var heads = await ctx.ExecuteReaderAsync(sql.ToString(), cmd => + { + for (var i = 0; i < nodeTypeIds.Length; i++) + cmd.Parameters.Add(ctx.CreateParameter("@TypeId" + i, DbType.Int32, nodeTypeIds[i])); + if (!string.IsNullOrEmpty(pathStart)) + cmd.Parameters.Add(ctx.CreateParameter("@Path", DbType.String, 450, pathStart.TrimEnd('/'))); + paramIndex = 0; + foreach (var prop in properties) + { + cmd.Parameters.Add(ctx.CreateParameter("@PropName" + paramIndex, DbType.String, 450, prop.PropertyName)); + cmd.Parameters.Add(ctx.CreateParameter("@PropValue" + paramIndex, DbType.String, 450, + prop.Value?.ToString() ?? string.Empty)); + paramIndex++; + } + }, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + var result = new List(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + result.Add(reader.GetInt32(0)); + return (IEnumerable)result; + }).ConfigureAwait(false); + + op.Successful = true; + return heads; + } + + // =============================================================================================== Exception handling + + protected override Exception GetException(Exception e, string msg = null) + { + if (e is ContentNotFoundException) + return e; + + if (e is NodeIsOutOfDateException) + return e; + + if (e is NodeAlreadyExistsException) + return e; + + if (e is NpgsqlException npgEx) + { + if (msg == null) + msg = "A database exception occured during execution of the operation." + + " See InnerException for details."; + + // 23505 = unique_violation + if (npgEx.SqlState == "23505") + return new NodeAlreadyExistsException(msg, npgEx); + } + + return base.GetException(e, msg); + } + + public override bool IsDeadlockException(Exception e) + { + // 40P01 = deadlock_detected + return e is NpgsqlException npgEx && npgEx.SqlState == "40P01"; + } + + // =============================================================================================== Installation + + public override async System.Threading.Tasks.Task InstallInitialDataAsync(InitialData initialData, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlDataProvider: InstallInitialData."); + + await _dataInstaller.InstallInitialDataAsync(initialData, this, cancellationToken).ConfigureAwait(false); + + op.Successful = true; + } + + public override async System.Threading.Tasks.Task InstallDatabaseAsync(CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlDataProvider: InstallDatabase."); + + if (!_dbInstallerOptions.EnableFirstInstallDB) + { + _logger.LogTrace("EnableFirstInstallDB is disabled. Skipping database installation."); + op.Successful = true; + return; + } + + if (await IsDatabaseAlreadyInstalledAsync(cancellationToken).ConfigureAwait(false)) + { + _logger.LogWarning("Database already contains data. Skipping installation to prevent data loss."); + op.Successful = true; + return; + } + + if (!string.IsNullOrEmpty(_dbInstallerOptions.DatabaseName)) + { + _logger.LogTrace($"Executing installer for database {_dbInstallerOptions.DatabaseName}."); + + await _databaseInstaller.InstallAsync().ConfigureAwait(false); + + // warmup: we have to wait a short period before the new db becomes usable + await Tools.Retrier.RetryAsync(15, 2000, async () => + { + _logger.LogTrace("Trying to connect to the new database..."); + + using var ctx = CreateDataContext(cancellationToken); + await ctx.ExecuteNonQueryAsync("SELECT 1 FROM pg_catalog.pg_tables LIMIT 1").ConfigureAwait(false); + }, (i, ex) => + { + if (ex == null) + { + _logger.LogTrace("Successfully connected to the newly created database."); + return true; + } + + if (i == 1) + _logger.LogError($"Could not connect to the database {_dbInstallerOptions.DatabaseName} after several retries."); + + return false; + }, cancellationToken); + } + else + { + _logger.LogTrace("Install database name is not configured, moving on to schema installation."); + } + + _logger.LogTrace("Executing security schema script."); + await ExecuteEmbeddedNonQueryScriptAsync( + "SenseNet.ContentRepository.Scripts.PgSqlInstall_Security.sql", cancellationToken) + .ConfigureAwait(false); + + _logger.LogTrace("Executing database schema script."); + await ExecuteEmbeddedNonQueryScriptAsync( + "SenseNet.ContentRepository.Scripts.PgSqlInstall_Schema.sql", cancellationToken) + .ConfigureAwait(false); + + op.Successful = true; + } + + private IBlobStorage BlobStorage => Providers.Instance.BlobStorage; + + private async System.Threading.Tasks.Task IsDatabaseAlreadyInstalledAsync(CancellationToken cancellationToken) + { + try + { + using var ctx = CreateDataContext(cancellationToken); + var result = await ctx.ExecuteScalarAsync(@" + SELECT CASE WHEN EXISTS ( + SELECT 1 FROM information_schema.tables WHERE table_name = 'Nodes' + ) THEN (SELECT COUNT(1) FROM ""Nodes"") ELSE 0 END").ConfigureAwait(false); + + var count = Convert.ToInt32(result); + if (count > 0) + _logger.LogTrace($"IsDatabaseAlreadyInstalledAsync: Nodes table contains {count} rows, database is already installed."); + + return count > 0; + } + catch (Exception ex) + { + _logger.LogWarning(ex, "IsDatabaseAlreadyInstalledAsync: Could not determine database state. Assuming not installed."); + return false; + } + } + + public override async Task IsDatabaseReadyAsync(CancellationToken cancellationToken) + { + try + { + using var ctx = CreateDataContext(cancellationToken); + var result = await ctx.ExecuteScalarAsync(@" + SELECT CASE WHEN EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = 'Nodes' + ) THEN true ELSE false END").ConfigureAwait(false); + return Convert.ToBoolean(result); + } + catch (NpgsqlException ex) when ( + ex.SqlState == "3D000" /* invalid_catalog_name (database doesn't exist) */ || + ex.SqlState == "08006" /* connection_failure */ || + ex.SqlState == "08001" /* sqlclient_unable_to_establish_sqlconnection */) + { + return false; + } + catch (Exception) + { + return false; + } + } + + // =============================================================================================== Timestamp + + protected override long ConvertTimestampToInt64(object timestamp) + { + if (timestamp == null || timestamp == DBNull.Value) + return 0L; + if (timestamp is long l) + return l; + if (timestamp is int i) + return i; + if (timestamp is byte[] bytes) + { + // byte[] may come from GetSafeByteArray which converts long via HostToNetworkOrder + if (bytes.Length == 8) + return System.Net.IPAddress.NetworkToHostOrder(BitConverter.ToInt64(bytes, 0)); + // Fallback: manual big-endian conversion (same as MSSQL) + var result = 0L; + for (int j = 0; j < bytes.Length; j++) + result = (result << 8) + bytes[j]; + return result; + } + return Convert.ToInt64(timestamp); + } + + protected override object ConvertInt64ToTimestamp(long timestamp) + { + return timestamp; + } + + // =============================================================================================== GetAppModelScript + + protected override string GetAppModelScript(IEnumerable paths, bool resolveAll, bool resolveChildren) + { + // Build path values for CTE + var sb = new System.Text.StringBuilder(); + var index = 0; + foreach (var path in paths) + { + if (index > 0) sb.Append(" UNION ALL "); + sb.AppendFormat("SELECT {0} AS \"Id\", '{1}' AS \"Path\"", ++index, + path.Replace("'", "''").Replace("/*", "**").Replace("--", "**")); + } + + var pathsCte = sb.ToString(); + + if (resolveAll) + { + if (resolveChildren) + { + // Return NodeId and Path of child nodes + return $@"WITH _paths AS ({pathsCte}) +SELECT N.""NodeId"", N.""Path""::TEXT FROM ""Nodes"" N +WHERE N.""ParentNodeId"" IN ( + SELECT N2.""NodeId"" FROM _paths C + LEFT OUTER JOIN ""Nodes"" N2 ON C.""Path"" = N2.""Path"" + WHERE N2.""Path"" IS NOT NULL +)"; + } + else + { + // Return all matching NodeIds + return $@"WITH _paths AS ({pathsCte}) +SELECT N.""NodeId"" FROM _paths C +LEFT OUTER JOIN ""Nodes"" N ON C.""Path"" = N.""Path"" +WHERE N.""Path"" IS NOT NULL +ORDER BY C.""Id"""; + } + } + else + { + // Return first matching NodeId only + return $@"WITH _paths AS ({pathsCte}) +SELECT N.""NodeId"" FROM _paths C +LEFT OUTER JOIN ""Nodes"" N ON C.""Path"" = N.""Path"" +WHERE N.""Path"" IS NOT NULL +ORDER BY C.""Id"" +LIMIT 1"; + } + } + + // =============================================================================================== Provider Tools + + public override DateTime RoundDateTime(DateTime d) + { + return new DateTime(d.Ticks / 100000 * 100000); + } + + // =============================================================================================== Health + + public override object GetConfigurationForHealthDashboard() + { + return new + { + Repository = _connectionOptions.Value.Repository != null + ? "(configured)" : "(not configured)", + _dataOptions.DbCommandTimeout, + _dataOptions.TransactionTimeout, + _dataOptions.LongTransactionTimeout + }; + } + + public override async Task GetHealthAsync(CancellationToken cancel) + { + object data = null; + string error = null; + TimeSpan? elapsed = null; + + try + { + var timer = System.Diagnostics.Stopwatch.StartNew(); + var sql = "SELECT \"Path\"::TEXT FROM \"Nodes\" WHERE \"NodeId\" = 1"; + using var ctx = CreateDataContext(cancel); + data = await ctx.ExecuteScalarAsync(sql).ConfigureAwait(false); + timer.Stop(); + elapsed = timer.Elapsed; + } + catch (Exception e) + { + error = e.Message; + } + + HealthResult result; + if (error != null) + { + result = new HealthResult + { + Color = HealthColor.Red, + Reason = $"ERROR: {error}", + Method = "Trying to load first Node's Path." + }; + } + else if (data == null || data == DBNull.Value) + { + result = new HealthResult + { + Color = HealthColor.Yellow, + Reason = "Invalid data", + Method = "Trying to interpret the loaded first Node's Path." + }; + } + else + { + result = new HealthResult + { + Color = HealthColor.Green, + ResponseTime = elapsed, + Method = "Measure time of loading first Node's Path in secs." + }; + } + + return result; + } + + // =============================================================================================== Script execution + + private async System.Threading.Tasks.Task ExecuteEmbeddedNonQueryScriptAsync(string scriptName, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlDataProvider: " + + "ExecuteEmbeddedNonQueryScript(scriptName: {0})", scriptName); + + using var stream = GetType().Assembly.GetManifestResourceStream(scriptName); + if (stream == null) + throw new InvalidOperationException($"Embedded resource {scriptName} not found."); + + using var sr = new System.IO.StreamReader(stream); + var script = await sr.ReadToEndAsync().ConfigureAwait(false); + + using var ctx = CreateDataContext(cancellationToken); + await ctx.ExecuteNonQueryAsync(script).ConfigureAwait(false); + + op.Successful = true; + } + + // =============================================================================================== Retry + + protected override bool ShouldRetryOnError(Exception ex) + { + if (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) + return true; + if (ex is NpgsqlException npgEx && ( + npgEx.IsTransient || + npgEx.SqlState == "08006" /* connection_failure */ || + npgEx.SqlState == "08001" /* sqlclient_unable_to_establish_sqlconnection */ || + npgEx.SqlState == "57P01" /* admin_shutdown */)) + return true; + return false; + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlDataProviderScripts.cs b/src/ContentRepository.PostgreSql/Data/PgSqlDataProviderScripts.cs new file mode 100644 index 000000000..c195b7614 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlDataProviderScripts.cs @@ -0,0 +1,1001 @@ +using System.Collections.Generic; +using System.Text; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + /// + /// Contains all SQL scripts for the PostgreSQL data provider, converted from T-SQL. + /// Each script is carefully matched to the column order and parameter names + /// expected by RelationalDataProviderBase. + /// + public partial class PgSqlDataProvider + { + // =============================================================================================== Node Insert + + #region InsertNodeAndVersionScript + // Base class params (Insert): + // Node: @NodeTypeId, @ContentListTypeId, @ContentListId, @CreatingInProgress, @IsDeleted, @IsInherited, + // @ParentNodeId, @Name, @DisplayName, @Path, @Index, @Locked, @LockedById, + // @ETag, @LockType, @LockTimeout, @LockDate, @LockToken, @LastLockUpdate, + // @NodeCreationDate, @NodeCreatedById, @NodeModificationDate, @NodeModifiedById, + // @IsSystem, @OwnerId, @SavingState, @ChangedData + // Version: @MajorNumber, @MinorNumber, @Status, + // @VersionCreationDate, @VersionCreatedById, @VersionModificationDate, @VersionModifiedById, + // @DynamicProperties, @ContentListProperties + // Result (by name): NodeId, NodeTimestamp, LastMajorVersionId, LastMinorVersionId, VersionId, VersionTimestamp + protected override string InsertNodeAndVersionScript => @"-- PgSqlDataProvider.InsertNodeAndVersion +SELECT * FROM sn_insert_node_and_version( + @NodeTypeId, @ContentListTypeId, @ContentListId, @CreatingInProgress, @IsDeleted, @IsInherited, + @ParentNodeId, @Name, @DisplayName, @Path, @Index, @Locked, @LockedById, + @ETag, @LockType, @LockTimeout, @LockDate, @LockToken, @LastLockUpdate, + @NodeCreationDate, @NodeCreatedById, @NodeModificationDate, @NodeModifiedById, + @IsSystem, @OwnerId, @SavingState, + @MajorNumber, @MinorNumber, @Status, @ChangedData, + @VersionCreationDate, @VersionCreatedById, @VersionModificationDate, @VersionModifiedById, + @DynamicProperties, @ContentListProperties +); +"; + #endregion + + #region InsertReferencePropertiesHeadScript + // Base class uses .Append() (not formatted). Params: @VersionId + // MsSql declares a table variable here. For PgSql, we handle in each per-property script. + protected override string InsertReferencePropertiesHeadScript => @"-- PgSqlDataProvider.InsertReferencePropertiesHead +DELETE FROM ""ReferenceProperties"" WHERE ""VersionId"" = @VersionId; +"; + #endregion + + #region InsertReferencePropertiesScript + // Base class uses .AppendFormat(script, index). + // Params per index: @PropertyTypeId{N}, @ReferredNodeIds{N} (comma-separated string) + protected override string InsertReferencePropertiesScript => @" +INSERT INTO ""ReferenceProperties"" (""VersionId"", ""PropertyTypeId"", ""ReferredNodeId"") +SELECT @VersionId, @PropertyTypeId{0}, unnest(string_to_array(@ReferredNodeIds{0}, ','))::int +WHERE COALESCE(@ReferredNodeIds{0}, '') <> ''; +"; + #endregion + + #region InsertLongtextPropertiesHeadScript + // Base class uses .Append() (not formatted). No content needed. + protected override string InsertLongtextPropertiesHeadScript => @"-- PgSqlDataProvider.InsertLongtextPropertiesHead +"; + #endregion + + #region InsertLongtextPropertiesScript + // Base class uses .AppendFormat(script, index). + // Params per index: @PropertyTypeId{N}, @Length{N}, @Value{N} + protected override string InsertLongtextPropertiesScript => @"INSERT INTO ""LongTextProperties"" + (""VersionId"", ""PropertyTypeId"", ""Length"", ""Value"") VALUES + (@VersionId, @PropertyTypeId{0}, @Length{0}, @Value{0}); +"; + #endregion + + // =============================================================================================== Node Update + + #region UpdateVersionScript + // Base class params: @VersionId, @NodeId, @MajorNumber, @MinorNumber, @Status, + // @CreationDate, @CreatedById, @ModificationDate, @ModifiedById, + // @ChangedData, @DynamicProperties, @ContentListProperties + // Return: ExecuteScalar -> single Timestamp value + protected override string UpdateVersionScript => @"-- PgSqlDataProvider.UpdateVersion +UPDATE ""Versions"" SET + ""NodeId"" = @NodeId, + ""MajorNumber"" = @MajorNumber, + ""MinorNumber"" = @MinorNumber, + ""CreationDate"" = @CreationDate, + ""CreatedById"" = @CreatedById, + ""ModificationDate"" = @ModificationDate, + ""ModifiedById"" = @ModifiedById, + ""Status"" = @Status, + ""ChangedData"" = @ChangedData, + ""DynamicProperties"" = @DynamicProperties, + ""ContentListProperties"" = @ContentListProperties +WHERE ""VersionId"" = @VersionId +RETURNING ""Timestamp"" +"; + #endregion + + #region UpdateNodeScript + // Base class params: @NodeId, @NodeTypeId, @ContentListTypeId, @ContentListId, + // @CreatingInProgress, @IsDeleted, @IsInherited, @ParentNodeId, @Name, @DisplayName, @Path, + // @Index, @Locked, @LockedById, @ETag, @LockType, @LockTimeout, @LockDate, @LockToken, @LastLockUpdate, + // @CreationDate, @CreatedById, @ModificationDate, @ModifiedById, + // @IsSystem, @OwnerId, @SavingState, @NodeTimestamp (binary) + // Return: ExecuteScalar -> single Timestamp value + protected override string UpdateNodeScript => @"-- PgSqlDataProvider.UpdateNode +UPDATE ""Nodes"" SET + ""NodeTypeId"" = @NodeTypeId, + ""ContentListTypeId"" = @ContentListTypeId, + ""ContentListId"" = @ContentListId, + ""CreatingInProgress"" = @CreatingInProgress, + ""IsDeleted"" = @IsDeleted, + ""IsInherited"" = @IsInherited, + ""ParentNodeId"" = @ParentNodeId, + ""Name"" = @Name, + ""DisplayName"" = @DisplayName, + ""Path"" = @Path, + ""Index"" = @Index, + ""Locked"" = @Locked, + ""LockedById"" = @LockedById, + ""ETag"" = @ETag, + ""LockType"" = @LockType, + ""LockTimeout"" = @LockTimeout, + ""LockDate"" = @LockDate, + ""LockToken"" = @LockToken, + ""LastLockUpdate"" = @LastLockUpdate, + ""CreationDate"" = @CreationDate, + ""CreatedById"" = @CreatedById, + ""ModificationDate"" = @ModificationDate, + ""ModifiedById"" = @ModifiedById, + ""IsSystem"" = @IsSystem, + ""OwnerId"" = @OwnerId, + ""SavingState"" = @SavingState +WHERE ""NodeId"" = @NodeId AND ""Timestamp"" = @NodeTimestamp +RETURNING ""Timestamp"" +"; + #endregion + + #region UpdateSubTreePathScript + protected override string UpdateSubTreePathScript => @"-- PgSqlDataProvider.UpdateSubTreePath +UPDATE ""Nodes"" SET ""Path"" = @NewPath || SUBSTRING(""Path""::TEXT FROM LENGTH(@OldPath::TEXT) + 1) +WHERE ""Path"" LIKE @OldPath || '/%' +"; + #endregion + + #region ManageLastVersionsScript + // Base class params: @NodeId, @VersionIds (comma-separated or DBNull) + // Result (by name): NodeTimestamp, LastMajorVersionId, LastMinorVersionId + protected override string ManageLastVersionsScript => @"-- PgSqlDataProvider.ManageLastVersions +-- Delete versions if @VersionIds is provided (string_to_array(NULL,...) returns NULL -> no rows matched) +DELETE FROM ""LongTextProperties"" WHERE ""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]); +DELETE FROM ""ReferenceProperties"" WHERE ""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]); +DELETE FROM ""Versions"" WHERE ""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]); + +-- Recalculate last versions +UPDATE ""Nodes"" SET + ""LastMinorVersionId"" = ( + SELECT ""VersionId"" FROM ""Versions"" + WHERE ""NodeId"" = @NodeId + ORDER BY ""MajorNumber"" DESC, ""MinorNumber"" DESC + LIMIT 1), + ""LastMajorVersionId"" = ( + SELECT ""VersionId"" FROM ""Versions"" + WHERE ""NodeId"" = @NodeId AND ""MinorNumber"" = 0 AND ""Status"" = 1 + ORDER BY ""MajorNumber"" DESC, ""MinorNumber"" DESC + LIMIT 1) +WHERE ""NodeId"" = @NodeId +RETURNING ""Timestamp"" AS ""NodeTimestamp"", ""LastMajorVersionId"", ""LastMinorVersionId"" +"; + #endregion + + #region UpdateReferencePropertiesHeadScript + // Base class uses .Append() (not formatted). Params: @VersionId + protected override string UpdateReferencePropertiesHeadScript => @"-- PgSqlDataProvider.UpdateReferencePropertiesHead +"; + #endregion + + #region UpdateReferencePropertiesScript + // Base class uses .AppendFormat(script, index). + // Params per index: @PropertyTypeId{N}, @ReferredNodeIds{N} (comma-separated string) + protected override string UpdateReferencePropertiesScript => @"-- +DELETE FROM ""ReferenceProperties"" WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" = @PropertyTypeId{0}; +INSERT INTO ""ReferenceProperties"" (""VersionId"", ""PropertyTypeId"", ""ReferredNodeId"") +SELECT @VersionId, @PropertyTypeId{0}, unnest(string_to_array(@ReferredNodeIds{0}, ','))::int +WHERE COALESCE(@ReferredNodeIds{0}, '') <> ''; +"; + #endregion + + #region UpdateLongtextPropertiesHeadScript + // Base class uses .Append() (not formatted). No format placeholder allowed here! + protected override string UpdateLongtextPropertiesHeadScript => @"-- PgSqlDataProvider.UpdateLongtextPropertiesHead +"; + #endregion + + #region UpdateLongtextPropertiesScript + // Base class uses .AppendFormat(script, index). + // Params per index: @PropertyTypeId{N}, @Length{N}, @Value{N} + protected override string UpdateLongtextPropertiesScript => @"-- PgSqlDataProvider.UpdateLongtextProperties +DELETE FROM ""LongTextProperties"" WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" = @PropertyTypeId{0}; +INSERT INTO ""LongTextProperties"" + (""VersionId"", ""PropertyTypeId"", ""Length"", ""Value"") VALUES + (@VersionId, @PropertyTypeId{0}, @Length{0}, @Value{0}); +"; + #endregion + + // =============================================================================================== Copy Version + + #region CopyVersionAndUpdateScript + // Base class params: @PreviousVersionId, @DestinationVersionId (int or DBNull), + // @NodeId, @MajorNumber, @MinorNumber, @Status, + // @CreationDate, @CreatedById, @ModificationDate, @ModifiedById, + // @ChangedData, @DynamicProperties, @ContentListProperties + // Result set 1 (by name): VersionId, Timestamp + // Result set 2 (by name): BinaryPropertyId, PropertyTypeId + protected override string CopyVersionAndUpdateScript => @"-- PgSqlDataProvider.CopyVersionAndUpdate +-- Set older locked versions to Draft +UPDATE ""Versions"" SET ""Status"" = 4 WHERE ""NodeId"" = @NodeId AND ""Status"" = 2; + +-- Insert or update version +WITH target_version AS ( + -- Insert new version if @DestinationVersionId is NULL + INSERT INTO ""Versions"" + (""NodeId"", ""MajorNumber"", ""MinorNumber"", ""CreationDate"", ""CreatedById"", + ""ModificationDate"", ""ModifiedById"", ""Status"", ""ChangedData"", + ""DynamicProperties"", ""ContentListProperties"") + SELECT + @NodeId, @MajorNumber, @MinorNumber, @CreationDate, @CreatedById, + @ModificationDate, @ModifiedById, @Status, @ChangedData, + @DynamicProperties, @ContentListProperties + WHERE @DestinationVersionId IS NULL + RETURNING ""VersionId"", ""Timestamp"" +) +SELECT ""VersionId"", ""Timestamp"" FROM target_version; + +-- If @DestinationVersionId is not NULL, update existing version +UPDATE ""Versions"" SET + ""NodeId"" = @NodeId, + ""MajorNumber"" = @MajorNumber, + ""MinorNumber"" = @MinorNumber, + ""CreationDate"" = @CreationDate, + ""CreatedById"" = @CreatedById, + ""ModificationDate"" = @ModificationDate, + ""ModifiedById"" = @ModifiedById, + ""Status"" = @Status, + ""ChangedData"" = @ChangedData, + ""DynamicProperties"" = @DynamicProperties, + ""ContentListProperties"" = @ContentListProperties +WHERE ""VersionId"" = @DestinationVersionId AND @DestinationVersionId IS NOT NULL +RETURNING ""VersionId"", ""Timestamp""; + +-- Copy properties from previous version +INSERT INTO ""BinaryProperties"" (""VersionId"", ""PropertyTypeId"", ""FileId"") + SELECT COALESCE(@DestinationVersionId, currval('""Versions_VersionId_seq""')), ""PropertyTypeId"", ""FileId"" + FROM ""BinaryProperties"" WHERE ""VersionId"" = @PreviousVersionId; +INSERT INTO ""ReferenceProperties"" (""VersionId"", ""PropertyTypeId"", ""ReferredNodeId"") + SELECT COALESCE(@DestinationVersionId, currval('""Versions_VersionId_seq""')), ""PropertyTypeId"", ""ReferredNodeId"" + FROM ""ReferenceProperties"" WHERE ""VersionId"" = @PreviousVersionId; +INSERT INTO ""LongTextProperties"" (""VersionId"", ""PropertyTypeId"", ""Length"", ""Value"") + SELECT COALESCE(@DestinationVersionId, currval('""Versions_VersionId_seq""')), ""PropertyTypeId"", ""Length"", ""Value"" + FROM ""LongTextProperties"" WHERE ""VersionId"" = @PreviousVersionId; + +-- Return binary properties info (result set 2) +SELECT B.""BinaryPropertyId"", B.""PropertyTypeId"" FROM ""BinaryProperties"" B + JOIN ""Files"" F ON B.""FileId"" = F.""FileId"" +WHERE B.""VersionId"" = COALESCE(@DestinationVersionId, currval('""Versions_VersionId_seq""')) + AND F.""Staging"" IS NULL +"; + #endregion + + // =============================================================================================== Changed Data + + #region LoadChangedDataScript + protected override string LoadChangedDataScript => @"-- PgSqlDataProvider.LoadChangedData +SELECT ""ChangedData"" FROM ""Versions"" WHERE ""VersionId"" = @VersionId +"; + #endregion + + #region SaveChangedDataScript + protected override string SaveChangedDataScript => @"-- PgSqlDataProvider.SaveChangedData +UPDATE ""Versions"" SET ""ChangedData"" = @ChangedData WHERE ""VersionId"" = @VersionId +RETURNING ""Timestamp"" +"; + #endregion + + // =============================================================================================== Load Nodes + + #region LoadNodesScript + // Base class params: @VersionIds (comma-separated string), @LongTextMaxSize (int) + // 4 result sets: BaseData (joined Node+Version), Binary, Reference, LongText + // BaseData columns read BY NAME: NodeId, NodeTypeId, ContentListTypeId, ContentListId, + // CreatingInProgress, IsDeleted, ParentNodeId, Name, DisplayName, Path, Index, + // Locked, LockedById, ETag, LockType, LockTimeout, LockDate, LockToken, LastLockUpdate, + // NodeCreationDate, NodeCreatedById, NodeModificationDate, NodeModifiedById, + // IsSystem, OwnerId, SavingState, ChangedData, NodeTimestamp, + // VersionId, MajorNumber, MinorNumber, CreationDate, CreatedById, + // ModificationDate, ModifiedById, Status, VersionTimestamp, + // DynamicProperties, ContentListProperties + protected override string LoadNodesScript => @"-- PgSqlDataProvider.LoadNodes +-- BaseData (joined Node+Version) +SELECT N.""NodeId"", N.""NodeTypeId"", N.""ContentListTypeId"", N.""ContentListId"", + N.""CreatingInProgress"", N.""IsDeleted"", N.""ParentNodeId"", + N.""Name"", N.""DisplayName"", N.""Path""::TEXT, N.""Index"", N.""Locked"", N.""LockedById"", + N.""ETag"", N.""LockType"", N.""LockTimeout"", N.""LockDate"", N.""LockToken"", N.""LastLockUpdate"", + N.""CreationDate"" AS ""NodeCreationDate"", N.""CreatedById"" AS ""NodeCreatedById"", + N.""ModificationDate"" AS ""NodeModificationDate"", N.""ModifiedById"" AS ""NodeModifiedById"", + N.""IsSystem"", N.""OwnerId"", + N.""SavingState"", V.""ChangedData"", + N.""Timestamp"" AS ""NodeTimestamp"", + V.""VersionId"", V.""MajorNumber"", V.""MinorNumber"", V.""CreationDate"", V.""CreatedById"", + V.""ModificationDate"", V.""ModifiedById"", V.""Status"", + V.""Timestamp"" AS ""VersionTimestamp"", + V.""DynamicProperties"", V.""ContentListProperties"" +FROM ""Nodes"" N + INNER JOIN ""Versions"" V ON N.""NodeId"" = V.""NodeId"" +WHERE V.""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]); + +-- BinaryProperties +SELECT B.""BinaryPropertyId"", B.""VersionId"", B.""PropertyTypeId"", + F.""FileId"", F.""ContentType"", F.""FileNameWithoutExtension"", + F.""Extension"", F.""Size"", F.""BlobProvider"", F.""BlobProviderData"", + F.""Checksum"", NULL AS ""Stream"", 0 AS ""Loaded"", F.""Timestamp"" +FROM ""BinaryProperties"" B + JOIN ""Files"" F ON B.""FileId"" = F.""FileId"" +WHERE B.""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]) AND F.""Staging"" IS NULL; + +-- ReferenceProperties +SELECT ""VersionId"", ""PropertyTypeId"", ""ReferredNodeId"" FROM ""ReferenceProperties"" +WHERE ""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]); + +-- LongTextProperties +SELECT ""VersionId"", ""PropertyTypeId"", ""Length"", ""Value"" FROM ""LongTextProperties"" +WHERE ""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]) AND ""Length"" < @LongTextMaxSize; +"; + #endregion + + // =============================================================================================== Delete Node + + #region DeleteNodeScript + // Base class params: @NodeId, @Timestamp (binary, can be null), @PartitionSize + protected override string DeleteNodeScript => @"-- PgSqlDataProvider.DeleteNode +DELETE FROM ""LongTextProperties"" WHERE ""VersionId"" IN ( + SELECT ""VersionId"" FROM ""Versions"" WHERE ""NodeId"" IN ( + SELECT ""NodeId"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId OR ""Path"" LIKE ( + SELECT ""Path"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId) || '/%')); +DELETE FROM ""ReferenceProperties"" WHERE ""VersionId"" IN ( + SELECT ""VersionId"" FROM ""Versions"" WHERE ""NodeId"" IN ( + SELECT ""NodeId"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId OR ""Path"" LIKE ( + SELECT ""Path"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId) || '/%')); +DELETE FROM ""BinaryProperties"" WHERE ""VersionId"" IN ( + SELECT ""VersionId"" FROM ""Versions"" WHERE ""NodeId"" IN ( + SELECT ""NodeId"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId OR ""Path"" LIKE ( + SELECT ""Path"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId) || '/%')); +DELETE FROM ""Versions"" WHERE ""NodeId"" IN ( + SELECT ""NodeId"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId OR ""Path"" LIKE ( + SELECT ""Path"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId) || '/%'); +DELETE FROM ""Nodes"" WHERE ""NodeId"" = @NodeId OR ""Path"" LIKE ( + SELECT ""Path"" FROM ""Nodes"" WHERE ""NodeId"" = @NodeId) || '/%'; +"; + #endregion + + // =============================================================================================== Move Node + + #region MoveNodeScript + protected override string MoveNodeScript => @"-- PgSqlDataProvider.MoveNode +UPDATE ""Nodes"" + SET ""ParentNodeId"" = @TargetId, + ""Path"" = @TargetPath || '/' || ""Name"" + WHERE ""NodeId"" = @SourceId AND ""Timestamp"" = @SourceTimestamp + RETURNING ""Timestamp"" +"; + #endregion + + // =============================================================================================== Text properties + + #region LoadTextPropertyValuesScript + protected override string LoadTextPropertyValuesScript => @"-- PgSqlDataProvider.LoadTextPropertyValues +SELECT ""PropertyTypeId"", ""Value"" FROM ""LongTextProperties"" +WHERE ""VersionId"" = @VersionId AND ""PropertyTypeId"" IN ({0}) +"; + #endregion + + // =============================================================================================== Node exists / Head + + #region NodeExistsScript + protected override string NodeExistsScript => @"-- PgSqlDataProvider.NodeExists +SELECT CASE WHEN EXISTS (SELECT 1 FROM ""Nodes"" WHERE ""Path"" = @Path) THEN 1 ELSE 0 END +"; + #endregion + + #region LoadNodeHeadByPathScript + protected override string LoadNodeHeadByPathScript => @"-- PgSqlDataProvider.LoadNodeHeadByPath +SELECT + ""NodeId"", -- 0 + ""Name"", -- 1 + ""DisplayName"", -- 2 + ""Path""::TEXT, -- 3 + ""ParentNodeId"", -- 4 + ""NodeTypeId"", -- 5 + ""ContentListTypeId"", -- 6 + ""ContentListId"", -- 7 + ""CreationDate"", -- 8 + ""ModificationDate"", -- 9 + ""LastMinorVersionId"", -- 10 + ""LastMajorVersionId"", -- 11 + ""OwnerId"", -- 12 + ""CreatedById"", -- 13 + ""ModifiedById"", -- 14 + ""Index"", -- 15 + ""LockedById"", -- 16 + ""Timestamp"" -- 17 +FROM ""Nodes"" +WHERE ""Path"" = @Path +"; + #endregion + + #region LoadNodeHeadByIdScript + protected override string LoadNodeHeadByIdScript => @"-- PgSqlDataProvider.LoadNodeHeadById +SELECT + ""NodeId"", -- 0 + ""Name"", -- 1 + ""DisplayName"", -- 2 + ""Path""::TEXT, -- 3 + ""ParentNodeId"", -- 4 + ""NodeTypeId"", -- 5 + ""ContentListTypeId"", -- 6 + ""ContentListId"", -- 7 + ""CreationDate"", -- 8 + ""ModificationDate"", -- 9 + ""LastMinorVersionId"", -- 10 + ""LastMajorVersionId"", -- 11 + ""OwnerId"", -- 12 + ""CreatedById"", -- 13 + ""ModifiedById"", -- 14 + ""Index"", -- 15 + ""LockedById"", -- 16 + ""Timestamp"" -- 17 +FROM ""Nodes"" +WHERE ""NodeId"" = @NodeId +"; + #endregion + + #region LoadNodeHeadByVersionIdScript + protected override string LoadNodeHeadByVersionIdScript => @"-- PgSqlDataProvider.LoadNodeHeadByVersionId +SELECT + N.""NodeId"", -- 0 + N.""Name"", -- 1 + N.""DisplayName"", -- 2 + N.""Path""::TEXT, -- 3 + N.""ParentNodeId"", -- 4 + N.""NodeTypeId"", -- 5 + N.""ContentListTypeId"", -- 6 + N.""ContentListId"", -- 7 + N.""CreationDate"", -- 8 + N.""ModificationDate"", -- 9 + N.""LastMinorVersionId"", -- 10 + N.""LastMajorVersionId"", -- 11 + N.""OwnerId"", -- 12 + N.""CreatedById"", -- 13 + N.""ModifiedById"", -- 14 + N.""Index"", -- 15 + N.""LockedById"", -- 16 + N.""Timestamp"" -- 17 +FROM ""Nodes"" N JOIN ""Versions"" V ON N.""NodeId"" = V.""NodeId"" +WHERE V.""VersionId"" = @VersionId +"; + #endregion + + #region LoadNodeHeadsByIdSetScript + protected override string LoadNodeHeadsByIdSetScript => @"-- PgSqlDataProvider.LoadNodeHeadsByIdSet +SELECT + ""NodeId"", -- 0 + ""Name"", -- 1 + ""DisplayName"", -- 2 + ""Path""::TEXT, -- 3 + ""ParentNodeId"", -- 4 + ""NodeTypeId"", -- 5 + ""ContentListTypeId"", -- 6 + ""ContentListId"", -- 7 + ""CreationDate"", -- 8 + ""ModificationDate"", -- 9 + ""LastMinorVersionId"", -- 10 + ""LastMajorVersionId"", -- 11 + ""OwnerId"", -- 12 + ""CreatedById"", -- 13 + ""ModifiedById"", -- 14 + ""Index"", -- 15 + ""LockedById"", -- 16 + ""Timestamp"" -- 17 +FROM ""Nodes"" +WHERE ""NodeId"" = ANY(string_to_array(@NodeIds, ',')::int[]) +"; + #endregion + + // =============================================================================================== Versions + + #region GetVersionNumbersByNodeIdScript + protected override string GetVersionNumbersByNodeIdScript => @"-- PgSqlDataProvider.GetVersionNumbersByNodeId +SELECT ""VersionId"", ""MajorNumber"", ""MinorNumber"", ""Status"" FROM ""Versions"" +WHERE ""NodeId"" = @NodeId ORDER BY ""MajorNumber"", ""MinorNumber"" +"; + #endregion + + #region GetVersionNumbersByPathScript + protected override string GetVersionNumbersByPathScript => @"-- PgSqlDataProvider.GetVersionNumbersByPath +SELECT V.""VersionId"", V.""MajorNumber"", V.""MinorNumber"", V.""Status"" +FROM ""Versions"" V JOIN ""Nodes"" N ON V.""NodeId"" = N.""NodeId"" +WHERE N.""Path"" = @Path +ORDER BY V.""MajorNumber"", V.""MinorNumber"" +"; + #endregion + + // =============================================================================================== NodeQuery + + #region InstanceCountScript + protected override string InstanceCountScript => @"-- PgSqlDataProvider.InstanceCount +SELECT COUNT(*) FROM ""Nodes"" WHERE ""NodeTypeId"" IN ({0}) +"; + #endregion + + #region GetChildrenIdentfiersScript + protected override string GetChildrenIdentfiersScript => @"-- PgSqlDataProvider.GetChildrenIdentifiers +SELECT ""NodeId"" FROM ""Nodes"" WHERE ""ParentNodeId"" = @ParentNodeId +"; + #endregion + + #region QueryNodesByReferenceScript + protected override string QueryNodesByReferenceScript => @"-- PgSqlDataProvider.QueryNodesByReference +SELECT V.""NodeId"" FROM ""ReferenceProperties"" R + JOIN ""Versions"" V ON R.""VersionId"" = V.""VersionId"" + JOIN ""Nodes"" N ON V.""VersionId"" = N.""LastMinorVersionId"" +WHERE R.""PropertyTypeId"" = @PropertyTypeId AND R.""ReferredNodeId"" = @ReferredNodeId +"; + #endregion + + #region QueryNodesByReferenceAndTypeScript + protected override string QueryNodesByReferenceAndTypeScript => @"-- PgSqlDataProvider.QueryNodesByReferenceAndType +SELECT N.""NodeId"" FROM ""ReferenceProperties"" R + JOIN ""Versions"" V ON R.""VersionId"" = V.""VersionId"" + JOIN ""Nodes"" N ON V.""VersionId"" = N.""LastMinorVersionId"" +WHERE R.""PropertyTypeId"" = @PropertyTypeId AND R.""ReferredNodeId"" = @ReferredNodeId + AND N.""NodeTypeId"" IN ({0}) +"; + #endregion + + // =============================================================================================== ContentList + + #region LoadChildTypesToAllowScript + protected override string LoadChildTypesToAllowScript => @"-- PgSqlDataProvider.LoadChildTypesToAllow +SELECT DISTINCT N.""NodeTypeId"" FROM ""Nodes"" N WHERE N.""ParentNodeId"" = @NodeId +"; + #endregion + + #region GetContentListTypesInTreeScript + protected override string GetContentListTypesInTreeScript => @"-- PgSqlDataProvider.GetContentListTypesInTree +SELECT DISTINCT N.""ContentListTypeId"" FROM ""Nodes"" N +WHERE N.""ContentListTypeId"" IS NOT NULL + AND N.""ContentListId"" IS NULL + AND N.""Path"" LIKE @Path || '/%' +"; + #endregion + + // =============================================================================================== TreeLock + + #region AcquireTreeLockScript + protected override string AcquireTreeLockScript => @"-- PgSqlDataProvider.AcquireTreeLock +INSERT INTO ""TreeLocks"" (""Path"", ""LockedAt"") +SELECT @Path0, NOW() AT TIME ZONE 'UTC' +WHERE NOT EXISTS ( + SELECT 1 FROM ""TreeLocks"" + WHERE @TimeMin < ""LockedAt"" AND ( + ""Path"" LIKE @Path0 || '/%' OR + ""Path"" IN ( {0} )) +) +RETURNING ""TreeLockId"" +"; + #endregion + + #region IsTreeLockedScript + protected override string IsTreeLockedScript => @"-- PgSqlDataProvider.IsTreeLocked +SELECT ""TreeLockId"" FROM ""TreeLocks"" +WHERE @TimeLimit < ""LockedAt"" AND ( + ""Path"" LIKE @Path0 || '/%' OR + ""Path"" IN ( {0} )) +LIMIT 1 +"; + #endregion + + #region ReleaseTreeLockScript + protected override string ReleaseTreeLockScript => @"-- PgSqlDataProvider.ReleaseTreeLock +DELETE FROM ""TreeLocks"" WHERE ""TreeLockId"" IN ({0}) +"; + #endregion + + #region LoadAllTreeLocksScript + protected override string LoadAllTreeLocksScript => @"-- PgSqlDataProvider.LoadAllTreeLocks +SELECT ""TreeLockId"", ""Path"" FROM ""TreeLocks"" +"; + #endregion + + #region DeleteUnusedLocksScript + protected override string DeleteUnusedLocksScript => @"-- PgSqlDataProvider.DeleteUnusedLocks +DELETE FROM ""TreeLocks"" WHERE ""LockedAt"" < @TimeMin +"; + #endregion + + // =============================================================================================== IndexDocument + + #region SaveIndexDocumentScript + protected override string SaveIndexDocumentScript => @"-- PgSqlDataProvider.SaveIndexDocument +UPDATE ""Versions"" SET ""IndexDocument"" = @IndexDocument WHERE ""VersionId"" = @VersionId +RETURNING ""Timestamp"" +"; + #endregion + + #region LoadIndexDocumentsCommonColumns + // GetIndexDocumentDataFromReader reads by name: + // NodeTypeId, VersionId, NodeId, ParentNodeId, Path, IsSystem, + // LastMinorVersionId, LastMajorVersionId, Status, IndexDocument, + // NodeTimestamp, VersionTimestamp + private string LoadIndexDocumentsCommon => @"SELECT N.""NodeTypeId"", V.""VersionId"", V.""NodeId"", + N.""ParentNodeId"", N.""Path""::TEXT AS ""Path"", N.""IsSystem"", + N.""LastMinorVersionId"", N.""LastMajorVersionId"", + V.""Status"", V.""IndexDocument"", + N.""Timestamp"" AS ""NodeTimestamp"", V.""Timestamp"" AS ""VersionTimestamp"" +FROM ""Versions"" V + INNER JOIN ""Nodes"" N ON V.""NodeId"" = N.""NodeId"" +"; + #endregion + + #region LoadIndexDocumentsByVersionIdScript + // Base class params: @VersionIds (comma-separated string) + protected override string LoadIndexDocumentsByVersionIdScript => @"-- PgSqlDataProvider.LoadIndexDocumentsByVersionId +" + LoadIndexDocumentsCommon + @"WHERE V.""VersionId"" = ANY(string_to_array(@VersionIds, ',')::int[]) +"; + #endregion + + #region LoadIndexDocumentCollectionBlockByPathScript + // Base class params: @Path, @Offset, @Count + protected override string LoadIndexDocumentCollectionBlockByPathScript => @"-- PgSqlDataProvider.LoadIndexDocumentCollectionBlockByPath +" + LoadIndexDocumentsCommon + @"WHERE (N.""Path"" = @Path OR N.""Path"" LIKE @Path || '/%') +ORDER BY N.""Path"" +OFFSET @Offset ROWS FETCH NEXT @Count ROWS ONLY +"; + #endregion + + #region LoadIndexDocumentCollectionBlockByPathAndTypeScript + // Base class params: @Path, @Offset, @Count + // NOTE: MsSql uses NOT IN for type exclusion + protected override string LoadIndexDocumentCollectionBlockByPathAndTypeScript => @"-- PgSqlDataProvider.LoadIndexDocumentCollectionBlockByPathAndType +" + LoadIndexDocumentsCommon + @"WHERE N.""NodeTypeId"" NOT IN ({0}) + AND (N.""Path"" = @Path OR N.""Path"" LIKE @Path || '/%') +ORDER BY N.""Path"" +OFFSET @Offset ROWS FETCH NEXT @Count ROWS ONLY +"; + #endregion + + #region LoadNotIndexedNodeIdsScript + // Base class params: @FromId, @ToId + protected override string LoadNotIndexedNodeIdsScript => @"-- PgSqlDataProvider.LoadNotIndexedNodeIds +SELECT ""NodeId"" FROM ""Versions"" +WHERE ""NodeId"" >= @FromId AND ""NodeId"" <= @ToId AND ""IndexDocument"" IS NULL +"; + #endregion + + // =============================================================================================== Indexing Activity + + #region GetLastIndexingActivityIdScript + protected override string GetLastIndexingActivityIdScript => @"-- PgSqlDataProvider.GetLastIndexingActivityId +SELECT COALESCE(MAX(""IndexingActivityId""), 0) FROM ""IndexingActivities"" +"; + #endregion + + #region DeleteRestorePointsScript + protected override string DeleteRestorePointsScript => @"-- PgSqlDataProvider.DeleteRestorePoints +DELETE FROM ""IndexingActivities"" WHERE ""ActivityType"" = 'Restore' +"; + #endregion + + #region GetCurrentIndexingActivityStatusScript + // Base class reads: column 0 = Int32 (Id), column 1 = String (State) + // First row = last done activity, remaining rows = gap activities + protected override string GetCurrentIndexingActivityStatusScript => @"-- PgSqlDataProvider.GetCurrentIndexingActivityStatus +WITH LastDone AS ( + SELECT ""IndexingActivityId"" + FROM ""IndexingActivities"" + WHERE ""RunningState"" = 'Done' + ORDER BY ""CreationDate"" DESC + LIMIT 1 +) +SELECT ld.""IndexingActivityId"", 'Done'::TEXT AS ""RunningState"" +FROM LastDone ld +UNION ALL +SELECT ia.""IndexingActivityId"", ia.""RunningState"" +FROM ""IndexingActivities"" ia, LastDone ld +WHERE ia.""RunningState"" != 'Done' AND ia.""IndexingActivityId"" < ld.""IndexingActivityId"" +"; + #endregion + + #region RestoreIndexingActivityStatusScript + // Base class: ExecuteScalar, expects string result: 'AlreadyRestored', 'Restored', or 'NotNecessary' + // Base class params: @LastActivityId (int), @Gaps (string, comma-separated) + // This complex logic requires PL/pgSQL. Create the function in schema install script, + // then call it here. + protected override string RestoreIndexingActivityStatusScript => @"-- PgSqlDataProvider.RestoreIndexingActivityStatus +SELECT sn_restore_indexing_activity_status(@LastActivityId, @Gaps) +"; + #endregion + + #region LoadIndexingActivitiesSkeletonScript + // GetIndexingActivitiesFromReaderAsync reads by name: + // IndexingActivityId, ActivityType, CreationDate, RunningState, LockTime, + // NodeId, VersionId, Path, Extension, + // IndexDocument, NodeTypeId, ParentNodeId, IsSystem, + // LastMinorVersionId, LastMajorVersionId, Status, NodeTimestamp, VersionTimestamp + private string LoadIndexingActivitiesSkeleton(string trace, string where) => $@"-- PgSqlDataProvider.{trace} +SELECT I.""IndexingActivityId"", I.""ActivityType"", I.""CreationDate"", I.""RunningState"", I.""LockTime"", + I.""NodeId"", I.""VersionId"", I.""Path"", I.""Extension"", + V.""IndexDocument"", N.""NodeTypeId"", N.""ParentNodeId"", N.""IsSystem"", + N.""LastMinorVersionId"", N.""LastMajorVersionId"", V.""Status"", + N.""Timestamp"" AS ""NodeTimestamp"", V.""Timestamp"" AS ""VersionTimestamp"" +FROM ""IndexingActivities"" I + LEFT OUTER JOIN ""Versions"" V ON V.""VersionId"" = I.""VersionId"" + LEFT OUTER JOIN ""Nodes"" N ON N.""NodeId"" = V.""NodeId"" +{where} +ORDER BY I.""IndexingActivityId"" +LIMIT @Top +"; + #endregion + + #region LoadIndexingActivitiesPageScript + // Base class params: @From, @To, @Top + protected override string LoadIndexingActivitiesPageScript => + LoadIndexingActivitiesSkeleton("LoadIndexingActivitiesPage", + @"WHERE I.""IndexingActivityId"" >= @From AND I.""IndexingActivityId"" <= @To"); + #endregion + + #region LoadIndexingActivitiyGapsScript + // Base class params: @Gaps (comma-separated string), @Top + protected override string LoadIndexingActivitiyGapsScript => + LoadIndexingActivitiesSkeleton("LoadIndexingActivityGaps", + @"WHERE I.""IndexingActivityId"" = ANY(string_to_array(@Gaps, ',')::int[])"); + #endregion + + #region LoadExecutableIndexingActivitiesScript + // Base class params: @Top, @TimeLimit + // MsSql does UPDATE+OUTPUT atomically. For PgSql, we use UPDATE...RETURNING with subquery. + protected override string LoadExecutableIndexingActivitiesScript => @"-- PgSqlDataProvider.LoadExecutableIndexingActivities +WITH executable AS ( + SELECT I.""IndexingActivityId"" + FROM ""IndexingActivities"" I + WHERE (I.""RunningState"" = 'Waiting' OR (I.""RunningState"" = 'Running' AND I.""LockTime"" < @TimeLimit)) + AND NOT EXISTS ( + SELECT 1 FROM ""IndexingActivities"" OLD + WHERE OLD.""IndexingActivityId"" < I.""IndexingActivityId"" + AND (OLD.""RunningState"" = 'Waiting' OR OLD.""RunningState"" = 'Running') + AND ( + I.""NodeId"" = OLD.""NodeId"" OR + (I.""VersionId"" <> 0 AND I.""VersionId"" = OLD.""VersionId"") OR + I.""Path"" LIKE OLD.""Path"" || '/%' OR + OLD.""Path"" LIKE I.""Path"" || '/%' + ) + ) + ORDER BY I.""IndexingActivityId"" + LIMIT @Top +), +updated AS ( + UPDATE ""IndexingActivities"" SET ""RunningState"" = 'Running', ""LockTime"" = NOW() AT TIME ZONE 'UTC' + FROM executable e + WHERE ""IndexingActivities"".""IndexingActivityId"" = e.""IndexingActivityId"" + RETURNING ""IndexingActivities"".""IndexingActivityId"", ""IndexingActivities"".""ActivityType"", + ""IndexingActivities"".""CreationDate"", ""IndexingActivities"".""RunningState"", + ""IndexingActivities"".""LockTime"", ""IndexingActivities"".""NodeId"", + ""IndexingActivities"".""VersionId"", ""IndexingActivities"".""Path"", + ""IndexingActivities"".""Extension"" +) +SELECT u.""IndexingActivityId"", u.""ActivityType"", u.""CreationDate"", u.""RunningState"", u.""LockTime"", + u.""NodeId"", u.""VersionId"", u.""Path"", u.""Extension"", + V.""IndexDocument"", N.""NodeTypeId"", N.""ParentNodeId"", N.""IsSystem"", + N.""LastMinorVersionId"", N.""LastMajorVersionId"", V.""Status"", + N.""Timestamp"" AS ""NodeTimestamp"", V.""Timestamp"" AS ""VersionTimestamp"" +FROM updated u + LEFT OUTER JOIN ""Versions"" V ON V.""VersionId"" = u.""VersionId"" + LEFT OUTER JOIN ""Nodes"" N ON N.""NodeId"" = u.""NodeId"" +ORDER BY u.""IndexingActivityId"" +"; + #endregion + + #region LoadExecutableAndFinishedIndexingActivitiesScript + // Same as above + second result set with finished activity IDs + // Base class params: @Top, @TimeLimit, @WaitingIds (comma-separated string) + protected override string LoadExecutableAndFinishedIndexingActivitiesScript => + LoadExecutableIndexingActivitiesScript + @" +;SELECT ""IndexingActivityId"" FROM ""IndexingActivities"" +WHERE ""RunningState"" = 'Done' AND ""IndexingActivityId"" = ANY(string_to_array(@WaitingIds, ',')::int[]) +"; + #endregion + + #region RegisterIndexingActivityScript + // Base class params: @ActivityType, @CreationDate, @RunningState, @LockTime, + // @NodeId, @VersionId, @Path, @VersionTimestamp, @Extension + // Return: ExecuteScalar -> IndexingActivityId + protected override string RegisterIndexingActivityScript => @"-- PgSqlDataProvider.RegisterIndexingActivity +INSERT INTO ""IndexingActivities"" + (""ActivityType"", ""CreationDate"", ""RunningState"", ""LockTime"", + ""NodeId"", ""VersionId"", ""Path"", ""VersionTimestamp"", ""Extension"") +VALUES (@ActivityType, @CreationDate, @RunningState, @LockTime, + @NodeId, @VersionId, @Path, @VersionTimestamp, @Extension) +RETURNING ""IndexingActivityId"" +"; + #endregion + + #region UpdateIndexingActivityRunningStateScript + protected override string UpdateIndexingActivityRunningStateScript => @"-- PgSqlDataProvider.UpdateIndexingActivityRunningState +UPDATE ""IndexingActivities"" SET ""RunningState"" = @RunningState, ""LockTime"" = NOW() AT TIME ZONE 'UTC' +WHERE ""IndexingActivityId"" = @IndexingActivityId +"; + #endregion + + #region RefreshIndexingActivityLockTimeScript + // Base class params: @Ids (comma-separated string), @LockTime + protected override string RefreshIndexingActivityLockTimeScript => @"-- PgSqlDataProvider.RefreshIndexingActivityLockTime +UPDATE ""IndexingActivities"" SET ""LockTime"" = @LockTime +WHERE ""IndexingActivityId"" = ANY(string_to_array(@Ids, ',')::int[]) +"; + #endregion + + #region DeleteFinishedIndexingActivitiesScript + // Base class params: @Minutes (int) + protected override string DeleteFinishedIndexingActivitiesScript => @"-- PgSqlDataProvider.DeleteFinishedIndexingActivities +DELETE FROM ""IndexingActivities"" +WHERE ""RunningState"" = 'Done' AND (""LockTime"" < NOW() AT TIME ZONE 'UTC' - (@Minutes || ' minutes')::INTERVAL OR ""LockTime"" IS NULL) +"; + #endregion + + #region DeleteAllIndexingActivitiesScript + protected override string DeleteAllIndexingActivitiesScript => @"-- PgSqlDataProvider.DeleteAllIndexingActivities +DELETE FROM ""IndexingActivities"" +"; + #endregion + + // =============================================================================================== Schema + + #region LoadSchemaScript + // Base class expects 4 result sets: + // 1. SchemaModification: Timestamp (bytes -> long) + // 2. PropertyTypes: PropertyTypeId, Name, DataType, Mapping, IsContentListProperty + // 3. NodeTypes: NodeTypeId, ParentId, Name, ClassName, Properties + // 4. ContentListTypes: ContentListTypeId, Name, Properties + protected override string LoadSchemaScript => @"-- PgSqlDataProvider.LoadSchema +SELECT ""Timestamp"" FROM ""SchemaModification""; +SELECT ""PropertyTypeId"", ""Name"", ""DataType"", ""Mapping"", ""IsContentListProperty"" FROM ""PropertyTypes""; +SELECT ""NodeTypeId"", ""ParentId"", ""Name"", ""ClassName"", ""Properties"" FROM ""NodeTypes""; +SELECT ""ContentListTypeId"", ""Name"", ""Properties"" FROM ""ContentListTypes""; +"; + #endregion + + #region StartSchemaUpdateScript + protected override string StartSchemaUpdateScript => @"-- PgSqlDataProvider.StartSchemaUpdate +WITH ins AS ( + INSERT INTO ""SchemaModification"" (""ModificationDate"", ""LockToken"") + SELECT NOW() AT TIME ZONE 'UTC', @LockToken + WHERE NOT EXISTS (SELECT 1 FROM ""SchemaModification"") + RETURNING 0 AS r +), upd AS ( + UPDATE ""SchemaModification"" + SET ""LockToken"" = @LockToken, ""ModificationDate"" = NOW() AT TIME ZONE 'UTC' + WHERE ""Timestamp"" = @Timestamp + AND (""LockToken"" IS NULL OR ""LockToken"" = @LockToken) + AND NOT EXISTS (SELECT 1 FROM ins) + RETURNING 0 AS r +) +SELECT CASE + WHEN EXISTS (SELECT 1 FROM ins) THEN 0 + WHEN EXISTS (SELECT 1 FROM upd) THEN 0 + WHEN EXISTS (SELECT 1 FROM ""SchemaModification"" + WHERE ""LockToken"" IS NULL OR ""LockToken"" = @LockToken) THEN -1 + ELSE -2 +END AS ""Result""; +"; + #endregion + + #region FinishSchemaUpdateScript + protected override string FinishSchemaUpdateScript => @"-- PgSqlDataProvider.FinishSchemaUpdate +UPDATE ""SchemaModification"" SET ""LockToken"" = NULL, ""ModificationDate"" = NOW() AT TIME ZONE 'UTC' +WHERE ""LockToken"" = @LockToken +RETURNING ""Timestamp"" +"; + #endregion + + // =============================================================================================== Logging + + #region WriteAuditEventScript + protected override string WriteAuditEventScript => @"-- PgSqlDataProvider.WriteAuditEvent +INSERT INTO ""LogEntries"" + (""EventId"", ""Category"", ""Priority"", ""Severity"", ""Title"", + ""ContentId"", ""ContentPath"", ""UserName"", ""LogDate"", + ""MachineName"", ""AppDomainName"", ""ProcessID"", ""ProcessName"", + ""ThreadName"", ""Win32ThreadId"", ""Message"", ""FormattedMessage"") +VALUES + (@EventId, @Category, @Priority, @Severity, @Title, + @ContentId, @ContentPath, @UserName, @LogDate, + @MachineName, @AppDomainName, @ProcessID, @ProcessName, + @ThreadName, @Win32ThreadId, @Message, @FormattedMessage) +RETURNING ""LogId"" +"; + #endregion + + #region LoadLastAuditEventsScript + protected override string LoadLastAuditEventsScript => @"-- PgSqlDataProvider.LoadLastAuditEvents +SELECT ""LogId"", ""EventId"", ""Category"", ""Priority"", ""Severity"", ""Title"", + ""ContentId"", ""ContentPath"", ""UserName"", ""LogDate"", + ""MachineName"", ""AppDomainName"", ""ProcessID"", ""ProcessName"", + ""ThreadName"", ""Win32ThreadId"", ""Message"", ""FormattedMessage"" +FROM ""LogEntries"" +ORDER BY ""LogId"" DESC LIMIT @Count +"; + #endregion + + // =============================================================================================== Tools + + #region GetNameOfLastNodeWithNameBaseScript + protected override string GetNameOfLastNodeWithNameBaseScript => @"-- PgSqlDataProvider.GetNameOfLastNodeWithNameBase +SELECT ""Name"" FROM ""Nodes"" +WHERE ""ParentNodeId"" = @ParentId AND ( + ""Name"" LIKE @NameEscaped || '(%)' || @Extension +) +ORDER BY LENGTH(""Name"") DESC, ""Name"" DESC +LIMIT 1 +"; + #endregion + + #region GetTreeSizeScript + protected override string GetTreeSizeScript => @"-- PgSqlDataProvider.GetTreeSize +SELECT SUM(F.""Size"") FROM ""Files"" F + JOIN ""BinaryProperties"" B ON F.""FileId"" = B.""FileId"" + JOIN ""Versions"" V ON B.""VersionId"" = V.""VersionId"" + JOIN ""Nodes"" N ON V.""NodeId"" = N.""NodeId"" +WHERE (N.""Path"" = @NodePath OR N.""Path"" LIKE @NodePath || '/%') + AND F.""Staging"" IS NULL +"; + #endregion + + #region GetNodeCountScript / GetNodeCountInSubtreeScript + protected override string GetNodeCountScript => @"-- PgSqlDataProvider.GetNodeCount +SELECT COUNT(*) FROM ""Nodes"" +"; + protected override string GetNodeCountInSubtreeScript => @"-- PgSqlDataProvider.GetNodeCountInSubtree +SELECT COUNT(*) FROM ""Nodes"" WHERE ""Path"" = @Path OR ""Path"" LIKE @Path || '/%' +"; + #endregion + + #region GetVersionCountScript / GetVersionCountInSubtreeScript + protected override string GetVersionCountScript => @"-- PgSqlDataProvider.GetVersionCount +SELECT COUNT(*) FROM ""Versions"" +"; + protected override string GetVersionCountInSubtreeScript => @"-- PgSqlDataProvider.GetVersionCountInSubtree +SELECT COUNT(*) FROM ""Versions"" V JOIN ""Nodes"" N ON V.""NodeId"" = N.""NodeId"" +WHERE N.""Path"" = @Path OR N.""Path"" LIKE @Path || '/%' +"; + #endregion + + // =============================================================================================== Entity Tree / Database Usage + + #region LoadEntityTreeScript + protected override string LoadEntityTreeScript => @"-- PgSqlDataProvider.LoadEntityTree +SELECT ""NodeId"", ""ParentNodeId"", ""OwnerId"" FROM ""Nodes"" +ORDER BY ""Path"" +"; + #endregion + + #region LoadDatabaseUsageScript + protected override string LoadDatabaseUsageScript => @"-- PgSqlDataProvider.LoadDatabaseUsage +SELECT N.""NodeId"", V.""VersionId"", N.""ParentNodeId"", N.""NodeTypeId"", + V.""MajorNumber"", V.""MinorNumber"", V.""Status"", + CASE V.""VersionId"" WHEN N.""LastMajorVersionId"" THEN 1 ELSE 0 END AS ""LastPub"", + CASE V.""VersionId"" WHEN N.""LastMinorVersionId"" THEN 1 ELSE 0 END AS ""LastWork"", + N.""OwnerId"", + COALESCE(octet_length(""DynamicProperties""::TEXT), 0) AS ""DynamicPropertiesSize"", + COALESCE(octet_length(""ContentListProperties""::TEXT), 0) AS ""ContentListPropertiesSize"", + COALESCE(octet_length(""ChangedData""::TEXT), 0) AS ""ChangedDataSize"", + COALESCE(octet_length(""IndexDocument""::TEXT), 0) AS ""IndexSize"" +FROM ""Nodes"" N + JOIN ""Versions"" V ON V.""NodeId"" = N.""NodeId""; + +SELECT ""VersionId"", COALESCE(octet_length(""Value""), 0) AS ""Size"" FROM ""LongTextProperties""; + +SELECT ""VersionId"", ""FileId"" FROM ""BinaryProperties""; + +SELECT ""FileId"", ""Size"", 0 AS ""StreamSize"" FROM ""Files""; + +SELECT COUNT(1)::INT AS ""Rows"", + COALESCE(SUM(pg_column_size(L.*))::BIGINT, 0) AS ""Metadata"", + COALESCE(SUM(COALESCE(octet_length(""FormattedMessage""), 0))::BIGINT, 0) AS ""Text"" +FROM ""LogEntries"" L; +"; + #endregion + + // =============================================================================================== AppModel + // GetAppModelScript is defined in PgSqlDataProvider.cs + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlDatabaseInstaller.cs b/src/ContentRepository.PostgreSql/Data/PgSqlDatabaseInstaller.cs new file mode 100644 index 000000000..eec4946b4 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlDatabaseInstaller.cs @@ -0,0 +1,256 @@ +using System; +using System.Data; +using System.Runtime.Serialization; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Npgsql; +using SenseNet.Tools.Configuration; + +namespace SenseNet.Storage.Data.PgSqlClient +{ + [Serializable] + public class PgDbCreationException : Exception + { + public PgDbCreationException() { } + public PgDbCreationException(string message) : base(message) { } + public PgDbCreationException(string message, Exception inner) : base(message, inner) { } + protected PgDbCreationException(SerializationInfo info, StreamingContext context) : base(info, context) { } + } + + /// + /// Options for configuring PostgreSQL database installation. + /// + [OptionsClass(sectionName: "sensenet:install:postgres")] + public class PgSqlDatabaseInstallationOptions + { + /// + /// Allows the application to install the database schema and initial data on first run. + /// + public bool EnableFirstInstallDB { get; set; } = false; + /// + /// Database server host name. + /// + public string Server { get; set; } + /// + /// Database server port. + /// + public int Port { get; set; } = 5432; + /// + /// Database name. + /// + public string DatabaseName { get; set; } + /// + /// The user used for creating the database (superuser or createdb role). + /// + public string DbCreatorUserName { get; set; } + /// + /// The password of the user used for creating the database. + /// + public string DbCreatorPassword { get; set; } + /// + /// The user who will be the owner of the database. + /// + public string DbOwnerUserName { get; set; } + /// + /// The password of the user who will be the owner of the database. + /// + public string DbOwnerPassword { get; set; } + } + + public class PgSqlDatabaseInstaller + { + private readonly ILogger _logger; + private readonly PgSqlDatabaseInstallationOptions _options; + + public PgSqlDatabaseInstaller(IOptions options, ILogger logger) + { + _options = options.Value; + _logger = logger; + } + + public async System.Threading.Tasks.Task InstallAsync() + { + ValidateParameters(_options); + var targetConnectionString = GetConnectionString(_options); + var maintenanceConnectionString = GetMaintenanceConnectionString(_options); + var isIntegratedCustomer = string.IsNullOrEmpty(_options.DbOwnerUserName); + + if (!isIntegratedCustomer) + await EnsureRoleAsync(_options.DbOwnerUserName, _options.DbOwnerPassword, maintenanceConnectionString) + .ConfigureAwait(false); + + await EnsureDatabaseAsync(_options.DatabaseName, _options.DbOwnerUserName, maintenanceConnectionString) + .ConfigureAwait(false); + + // Ensure citext extension is available in the new database + await EnsureCitextExtensionAsync(targetConnectionString).ConfigureAwait(false); + } + + public void ValidateParameters(PgSqlDatabaseInstallationOptions options) + { + if (string.IsNullOrEmpty(options.DatabaseName)) + throw new ArgumentException("DatabaseName cannot be null or empty."); + } + + public string GetConnectionString(PgSqlDatabaseInstallationOptions options) + { + var builder = new NpgsqlConnectionStringBuilder + { + Host = string.IsNullOrEmpty(options.Server) ? "localhost" : options.Server, + Port = options.Port > 0 ? options.Port : 5432, + Database = options.DatabaseName + }; + + if (!string.IsNullOrEmpty(options.DbCreatorUserName)) + { + builder.Username = options.DbCreatorUserName; + builder.Password = options.DbCreatorPassword; + } + + return builder.ConnectionString; + } + + public string GetMaintenanceConnectionString(PgSqlDatabaseInstallationOptions options) + { + var builder = new NpgsqlConnectionStringBuilder + { + Host = string.IsNullOrEmpty(options.Server) ? "localhost" : options.Server, + Port = options.Port > 0 ? options.Port : 5432, + Database = "postgres" // Connect to maintenance database + }; + + if (!string.IsNullOrEmpty(options.DbCreatorUserName)) + { + builder.Username = options.DbCreatorUserName; + builder.Password = options.DbCreatorPassword; + } + + return builder.ConnectionString; + } + + private async System.Threading.Tasks.Task EnsureRoleAsync(string userName, string password, string connectionString) + { + _logger.LogTrace($"Ensure role: {userName}"); + + var roleExists = await QueryRoleExistsAsync(userName, connectionString).ConfigureAwait(false); + if (!roleExists) + await CreateRoleAsync(userName, password, connectionString).ConfigureAwait(false); + } + + private async Task QueryRoleExistsAsync(string userName, string connectionString) + { + var sql = $"SELECT 1 FROM pg_roles WHERE rolname = '{userName}'"; + var result = false; + await ExecuteSqlQueryAsync(sql, connectionString, reader => + { + result = true; + return false; + }); + return result; + } + + public async System.Threading.Tasks.Task CreateRoleAsync(string userName, string password, string connectionString) + { + try + { + // In PostgreSQL, a LOGIN role is equivalent to a SQL Server LOGIN + var sql = $"CREATE ROLE \"{userName}\" WITH LOGIN PASSWORD '{password}'"; + await ExecuteSqlCommandAsync(sql, connectionString); + } + catch (PostgresException e) + { + if (e.SqlState == "42710") // duplicate_object: role already exists + return; + throw new PgDbCreationException($"Cannot create role on the server. {e.Message}", e); + } + } + + private async System.Threading.Tasks.Task EnsureDatabaseAsync(string databaseName, string ownerUserName, string connectionString) + { + _logger.LogTrace($"Querying database: {databaseName}"); + var isExist = await QueryDatabaseExistsAsync(databaseName, connectionString).ConfigureAwait(false); + if (!isExist) + await CreateDatabaseAsync(databaseName, ownerUserName, connectionString).ConfigureAwait(false); + } + + private async Task QueryDatabaseExistsAsync(string databaseName, string connectionString) + { + var result = false; + var sql = $"SELECT 1 FROM pg_database WHERE datname = '{databaseName}'"; + await ExecuteSqlQueryAsync(sql, connectionString, reader => + { + result = true; + return false; + }); + return result; + } + + public async System.Threading.Tasks.Task CreateDatabaseAsync(string databaseName, string ownerUserName, string connectionString) + { + var ownerClause = string.IsNullOrEmpty(ownerUserName) ? "" : $" OWNER \"{ownerUserName}\""; + var sql = $@"CREATE DATABASE ""{databaseName}""{ownerClause} + ENCODING 'UTF8' + LC_COLLATE 'en_US.UTF-8' + LC_CTYPE 'en_US.UTF-8' + TEMPLATE template0"; + + _logger.LogTrace($"Creating database: {databaseName}"); + + try + { + await ExecuteSqlCommandAsync(sql, connectionString); + } + catch (PostgresException e) + { + if (e.SqlState == "42P04") // duplicate_database: database already exists + return; + // Try a simpler CREATE DATABASE without locale settings + try + { + var simpleSql = $@"CREATE DATABASE ""{databaseName}""{ownerClause}"; + await ExecuteSqlCommandAsync(simpleSql, connectionString); + } + catch (PostgresException e2) + { + if (e2.SqlState == "42P04") + return; + throw new PgDbCreationException($"Cannot create database. {e2.Message}", e2); + } + } + } + + private async System.Threading.Tasks.Task EnsureCitextExtensionAsync(string connectionString) + { + try + { + await ExecuteSqlCommandAsync("CREATE EXTENSION IF NOT EXISTS citext", connectionString); + } + catch (PostgresException e) + { + _logger.LogWarning($"Could not create citext extension: {e.Message}"); + } + } + + /* =================================================================================== */ + + private async System.Threading.Tasks.Task ExecuteSqlCommandAsync(string sql, string connectionString) + { + await using var cn = new NpgsqlConnection(connectionString); + await using var cmd = new NpgsqlCommand(sql, cn); + cmd.CommandType = CommandType.Text; + await cn.OpenAsync().ConfigureAwait(false); + await cmd.ExecuteNonQueryAsync().ConfigureAwait(false); + } + + private async System.Threading.Tasks.Task ExecuteSqlQueryAsync(string sql, string connectionString, Func processRow) + { + await using var cn = new NpgsqlConnection(connectionString); + await using var cmd = new NpgsqlCommand(sql, cn); + cmd.CommandType = CommandType.Text; + await cn.OpenAsync().ConfigureAwait(false); + await using var reader = await cmd.ExecuteReaderAsync().ConfigureAwait(false); + while (await reader.ReadAsync().ConfigureAwait(false) && processRow(reader)) ; + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlExclusiveLockDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlExclusiveLockDataProvider.cs new file mode 100644 index 000000000..ec30cc637 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlExclusiveLockDataProvider.cs @@ -0,0 +1,151 @@ +using System; +using System.Data; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.Diagnostics; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public class PgSqlExclusiveLockDataProvider : IExclusiveLockDataProvider + { + private readonly IRetrier _retrier; + private DataOptions DataOptions { get; } + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlExclusiveLockDataProvider(IOptions dataOptions, + IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + DataOptions = dataOptions?.Value ?? new DataOptions(); + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + } + + public async Task AcquireAsync(string key, string operationId, + DateTime timeLimit, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlExclusiveLockDataProvider: " + + "Acquire(key: {0}, operationId: {1})", key, operationId); + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await ctx.ExecuteScalarAsync(AcquireScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Key", DbType.String, 450, key), + ctx.CreateParameter("@OperationId", DbType.String, 450, operationId), + ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }).ConfigureAwait(false); + op.Successful = true; + return result != null && Convert.ToBoolean(result); + } + + public async Task IsLockedAsync(string key, string operationId, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var result = await ctx.ExecuteScalarAsync(IsLockedScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Key", DbType.String, 450, key), + ctx.CreateParameter("@OperationId", DbType.String, 450, operationId), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }).ConfigureAwait(false); + return result != null && Convert.ToBoolean(result); + } + + public async System.Threading.Tasks.Task RefreshAsync(string key, string operationId, DateTime newTimeLimit, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync(RefreshScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Key", DbType.String, 450, key), + ctx.CreateParameter("@OperationId", DbType.String, 450, operationId), + ctx.CreateParameter("@TimeLimit", DbType.DateTime2, newTimeLimit), + }); + }).ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task ReleaseAsync(string key, string operationId, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync(ReleaseScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Key", DbType.String, 450, key), + ctx.CreateParameter("@OperationId", DbType.String, 450, operationId), + }); + }).ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task ReleaseAllAsync(CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync(@"DELETE FROM ""ExclusiveLocks""").ConfigureAwait(false); + } + + public async Task IsFeatureAvailable(CancellationToken cancellationToken) + { + try + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteScalarAsync(@"SELECT 1 FROM ""ExclusiveLocks"" LIMIT 1").ConfigureAwait(false); + return true; + } + catch + { + return false; + } + } + + // =============================================================================================== Scripts + + private const string AcquireScript = @"-- PgSqlExclusiveLockDataProvider.Acquire +DELETE FROM ""ExclusiveLocks"" WHERE ""TimeLimit"" < @Now; +INSERT INTO ""ExclusiveLocks"" (""Name"", ""OperationId"", ""TimeLimit"") +VALUES (@Key, @OperationId, @TimeLimit) +ON CONFLICT (""Name"") DO NOTHING +RETURNING TRUE +"; + + private const string IsLockedScript = @"-- PgSqlExclusiveLockDataProvider.IsLocked +SELECT EXISTS( + SELECT 1 FROM ""ExclusiveLocks"" + WHERE ""Name"" = @Key AND ""OperationId"" != @OperationId AND ""TimeLimit"" > @Now +) +"; + + private const string RefreshScript = @"-- PgSqlExclusiveLockDataProvider.Refresh +UPDATE ""ExclusiveLocks"" SET ""TimeLimit"" = @TimeLimit +WHERE ""Name"" = @Key AND ""OperationId"" = @OperationId +"; + + private const string ReleaseScript = @"-- PgSqlExclusiveLockDataProvider.Release +DELETE FROM ""ExclusiveLocks"" +WHERE ""Name"" = @Key AND ""OperationId"" = @OperationId +"; + + // =============================================================================================== Installation + + public static readonly string CreationScript = @"-- PgSqlExclusiveLockDataProvider.CreateTable +CREATE TABLE IF NOT EXISTS ""ExclusiveLocks"" ( + ""Id"" SERIAL PRIMARY KEY, + ""Name"" VARCHAR(450) NOT NULL UNIQUE, + ""OperationId"" VARCHAR(450) NOT NULL, + ""TimeLimit"" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +"; + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlPackagingDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlPackagingDataProvider.cs new file mode 100644 index 000000000..62afa139a --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlPackagingDataProvider.cs @@ -0,0 +1,426 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.IO; +using System.Linq; +using System.Threading; +using STT=System.Threading.Tasks; +using Newtonsoft.Json; +using SenseNet.Diagnostics; + +// ReSharper disable AccessToDisposedClosure + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + /// + /// This is a PostgreSQL implementation of the interface. + /// It requires the main data provider to be a . + /// + public class PgSqlPackagingDataProvider : IPackagingDataProvider + { + private readonly RelationalDataProviderBase _mainProvider; + + public PgSqlPackagingDataProvider(DataProvider mainProvider) + { + if (mainProvider == null) + return; + if (!(mainProvider is RelationalDataProviderBase relationalDataProviderBase)) + throw new ArgumentException("The mainProvider need to be RelationalDataProviderBase."); + _mainProvider = relationalDataProviderBase; + } + + #region SQL LoadInstalledComponentsScript + private static readonly string InstalledComponentsScript = $@"-- PgSqlPackagingDataProvider.LoadInstalledComponents +SELECT ""ComponentId"", ""PackageType"", ""ComponentVersion"", ""Description"", ""Manifest"" +FROM ""Packages"" WHERE + (""PackageType"" = '{PackageType.Install}' OR ""PackageType"" = '{PackageType.Patch}') AND + ""ExecutionResult"" = '{ExecutionResult.Successful}' +ORDER BY ""ComponentId"", ""ComponentVersion"", ""ExecutionDate"" +"; + #endregion + public async STT.Task> LoadInstalledComponentsAsync(CancellationToken cancellationToken) + { + if (!(await _mainProvider.IsDatabaseReadyAsync(cancellationToken))) + return new ComponentInfo[0]; + + var components = new Dictionary(); + var descriptions = new Dictionary(); + + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: LoadInstalledComponents()"); + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + await ctx.ExecuteReaderAsync(InstalledComponentsScript, + async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + cancel.ThrowIfCancellationRequested(); + + var component = new ComponentInfo + { + ComponentId = reader.GetSafeString(reader.GetOrdinal("ComponentId")), + Version = DecodePackageVersion( + reader.GetSafeString(reader.GetOrdinal("ComponentVersion"))), + Description = reader.GetSafeString(reader.GetOrdinal("Description")), + Manifest = reader.GetSafeString(reader.GetOrdinal("Manifest")), + ExecutionResult = ExecutionResult.Successful + }; + + components[component.ComponentId] = component; + if (reader.GetSafeString(reader.GetOrdinal("PackageType")) + == nameof(PackageType.Install)) + descriptions[component.ComponentId] = component.Description; + } + + return true; + }).ConfigureAwait(false); + + foreach (var item in descriptions) + components[item.Key].Description = item.Value; + op.Successful = true; + + return components.Values.ToArray(); + } + + #region SQL LoadIncompleteComponentsScript + private static readonly string IncompleteComponentsScript = $@"-- PgSqlPackagingDataProvider.LoadIncompleteComponents +SELECT ""ComponentId"", ""PackageType"", ""ComponentVersion"", ""Description"", ""Manifest"", ""ExecutionResult"" +FROM ""Packages"" WHERE + (""PackageType"" = '{PackageType.Install}' OR ""PackageType"" = '{PackageType.Patch}') AND + ""ExecutionResult"" != '{ExecutionResult.Successful}' +ORDER BY ""ComponentId"", ""ComponentVersion"", ""ExecutionDate"" +"; + #endregion + public async STT.Task> LoadIncompleteComponentsAsync(CancellationToken cancellationToken) + { + if (!(await _mainProvider.IsDatabaseReadyAsync(cancellationToken))) + return new ComponentInfo[0]; + + var components = new Dictionary(); + var descriptions = new Dictionary(); + + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: LoadIncompleteComponents()"); + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + await ctx.ExecuteReaderAsync(IncompleteComponentsScript, + async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + cancel.ThrowIfCancellationRequested(); + + var src = reader.GetSafeString(reader.GetOrdinal("ExecutionResult")); + var executionResult = src == null + ? ExecutionResult.Unfinished + : (ExecutionResult) Enum.Parse(typeof(ExecutionResult), src); + + var component = new ComponentInfo + { + ComponentId = reader.GetSafeString(reader.GetOrdinal("ComponentId")), + Version = DecodePackageVersion( + reader.GetSafeString(reader.GetOrdinal("ComponentVersion"))), + Description = reader.GetSafeString(reader.GetOrdinal("Description")), + Manifest = reader.GetSafeString(reader.GetOrdinal("Manifest")), + ExecutionResult = executionResult + }; + + components[component.ComponentId] = component; + if (reader.GetSafeString(reader.GetOrdinal("PackageType")) + == nameof(PackageType.Install)) + descriptions[component.ComponentId] = component.Description; + } + + return true; + }).ConfigureAwait(false); + + foreach (var item in descriptions) + components[item.Key].Description = item.Value; + op.Successful = true; + + return components.Values.ToArray(); + } + + public async STT.Task> LoadInstalledPackagesAsync(CancellationToken cancellationToken) + { + var packages = new List(); + + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: LoadInstalledPackages()"); + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + await ctx.ExecuteReaderAsync(@"SELECT * FROM ""Packages""", + async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + cancel.ThrowIfCancellationRequested(); + packages.Add(new Package + { + Id = reader.GetInt32(reader.GetOrdinal("Id")), + Description = reader.GetSafeString(reader.GetOrdinal("Description")), + ComponentId = reader.GetSafeString(reader.GetOrdinal("ComponentId")), + PackageType = (PackageType) Enum.Parse(typeof(PackageType), + reader.GetString(reader.GetOrdinal("PackageType"))), + ReleaseDate = reader.GetDateTimeUtc(reader.GetOrdinal("ReleaseDate")), + ExecutionDate = reader.GetDateTimeUtc(reader.GetOrdinal("ExecutionDate")), + ExecutionResult = (ExecutionResult) Enum.Parse(typeof(ExecutionResult), + reader.GetString(reader.GetOrdinal("ExecutionResult"))), + ExecutionError = + DeserializeExecutionError( + reader.GetSafeString(reader.GetOrdinal("ExecutionError"))), + ComponentVersion = + DecodePackageVersion(reader.GetSafeString(reader.GetOrdinal("ComponentVersion"))), + Manifest = reader.GetSafeString(reader.GetOrdinal("Manifest")), + }); + } + + return true; + }).ConfigureAwait(false); + op.Successful = true; + + return packages; + } + + #region SQL SavePackageScript + private static readonly string SavePackageScript = @"INSERT INTO ""Packages"" + (""Description"", ""ComponentId"", ""PackageType"", ""ReleaseDate"", ""ExecutionDate"", + ""ExecutionResult"", ""ExecutionError"", ""ComponentVersion"", ""Manifest"") VALUES + (@Description, @ComponentId, @PackageType, @ReleaseDate, @ExecutionDate, + @ExecutionResult, @ExecutionError, @ComponentVersion, @Manifest) +RETURNING ""Id"""; + #endregion + public async STT.Task SavePackageAsync(Package package, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: " + + "SavePackage: ComponentId: {0}, ComponentVersion: {1}, ExecutionResult: {2}", + package.ComponentId, package.ComponentVersion, package.ExecutionResult); + + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + var result = await ctx.ExecuteScalarAsync(SavePackageScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Description", DbType.String, 1000, + (object) package.Description ?? DBNull.Value), + ctx.CreateParameter("@ComponentId", DbType.AnsiString, 50, + (object) package.ComponentId ?? DBNull.Value), + ctx.CreateParameter("@PackageType", DbType.AnsiString, 50, package.PackageType.ToString()), + ctx.CreateParameter("@ReleaseDate", DbType.DateTime2, package.ReleaseDate), + ctx.CreateParameter("@ExecutionDate", DbType.DateTime2, package.ExecutionDate), + ctx.CreateParameter("@ExecutionResult", DbType.AnsiString, 50, package.ExecutionResult.ToString()), + ctx.CreateParameter("@ExecutionError", DbType.String, int.MaxValue, + SerializeExecutionError(package.ExecutionError) ?? (object) DBNull.Value), + ctx.CreateParameter("@ComponentVersion", DbType.AnsiString, 50, + package.ComponentVersion == null + ? DBNull.Value + : (object) EncodePackageVersion(package.ComponentVersion)), + ctx.CreateParameter("@Manifest", DbType.String, int.MaxValue, package.Manifest ?? (object) DBNull.Value) + }); + }).ConfigureAwait(false); + + package.Id = Convert.ToInt32(result); + op.Successful = true; + } + + #region SQL UpdatePackageScript + private static readonly string UpdatePackageScript = @"UPDATE ""Packages"" + SET ""ComponentId"" = @ComponentId, + ""Description"" = @Description, + ""PackageType"" = @PackageType, + ""ReleaseDate"" = @ReleaseDate, + ""ExecutionDate"" = @ExecutionDate, + ""ExecutionResult"" = @ExecutionResult, + ""ExecutionError"" = @ExecutionError, + ""ComponentVersion"" = @ComponentVersion +WHERE ""Id"" = @Id +"; + #endregion + public async STT.Task UpdatePackageAsync(Package package, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: " + + "UpdatePackage: ComponentId: {0}, ComponentVersion: {1}, ExecutionResult: {2}", + package.ComponentId, package.ComponentVersion, package.ExecutionResult); + + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + await ctx.ExecuteNonQueryAsync(UpdatePackageScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Id", DbType.Int32, package.Id), + ctx.CreateParameter("@Description", DbType.String, 1000, + (object) package.Description ?? DBNull.Value), + ctx.CreateParameter("@ComponentId", DbType.AnsiString, 50, + (object) package.ComponentId ?? DBNull.Value), + ctx.CreateParameter("@PackageType", DbType.AnsiString, 50, package.PackageType.ToString()), + ctx.CreateParameter("@ReleaseDate", DbType.DateTime2, package.ReleaseDate), + ctx.CreateParameter("@ExecutionDate", DbType.DateTime2, package.ExecutionDate), + ctx.CreateParameter("@ExecutionResult", DbType.AnsiString, 50, + package.ExecutionResult.ToString()), + ctx.CreateParameter("@ExecutionError", DbType.String, int.MaxValue, + SerializeExecutionError(package.ExecutionError) ?? (object) DBNull.Value), + ctx.CreateParameter("@ComponentVersion", DbType.AnsiString, 50, + package.ComponentVersion == null + ? DBNull.Value + : (object) EncodePackageVersion(package.ComponentVersion)) + }); + }).ConfigureAwait(false); + op.Successful = true; + } + + #region SQL PackageExistenceScript + private static readonly string PackageExistenceScript = @"SELECT COUNT(0) FROM ""Packages"" +WHERE ""ComponentId"" = @ComponentId AND ""PackageType"" = @PackageType AND ""ComponentVersion"" = @Version +"; + #endregion + public async STT.Task IsPackageExistAsync(string componentId, PackageType packageType, Version version + , CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: " + + "IsPackageExist(componentId: {0}, packageType: {1}, version: {2})", componentId, packageType, version); + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + var result = await ctx.ExecuteScalarAsync(PackageExistenceScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@ComponentId", DbType.AnsiString, 50, (object)componentId ?? DBNull.Value), + ctx.CreateParameter("@PackageType", DbType.AnsiString, 50, packageType.ToString()), + ctx.CreateParameter("@Version", DbType.AnsiString, 50, EncodePackageVersion(version)) + }); + }).ConfigureAwait(false); + var count = Convert.ToInt32(result); + op.Successful = true; + + return count > 0; + } + + public async STT.Task DeletePackageAsync(Package package, CancellationToken cancellationToken) + { + if (package.Id < 1) + throw new ApplicationException("Cannot delete unsaved package"); + + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: " + + "DeletePackage: Id: {0}", package.Id); + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + await ctx.ExecuteNonQueryAsync(@"DELETE FROM ""Packages"" WHERE ""Id"" = @Id", + cmd => { cmd.Parameters.Add(ctx.CreateParameter("@Id", DbType.Int32, package.Id)); }).ConfigureAwait(false); + op.Successful = true; + } + + public async STT.Task DeleteAllPackagesAsync(CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: DeleteAllPackages()"); + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + await ctx.ExecuteNonQueryAsync(@"TRUNCATE TABLE ""Packages"" RESTART IDENTITY CASCADE").ConfigureAwait(false); + op.Successful = true; + } + + #region SQL LoadManifestScript + private static readonly string LoadManifestScript = @"SELECT ""Manifest"" FROM ""Packages"" WHERE ""Id"" = @Id"; + #endregion + public async STT.Task LoadManifestAsync(Package package, CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider: " + + "LoadManifest: Id: {0}", package.Id); + + using var ctx = _mainProvider.CreateDataContext(cancellationToken); + var result = await ctx.ExecuteScalarAsync(LoadManifestScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@Id", DbType.Int32, package.Id) + }); + }).ConfigureAwait(false); + package.Manifest = (string)(result == DBNull.Value ? null : result); + + op.Successful = true; + } + + /* =============================================================================== Methods for Steps */ + + public Dictionary GetContentPathsWhereTheyAreAllowedChildren(List names) + { + var result = new Dictionary(); + + var whereClausePart = string.Join(Environment.NewLine + " OR" + Environment.NewLine, + names.Select(n => + $" (t.\"Value\" like '{n}' OR t.\"Value\" like '% {n} %' OR t.\"Value\" like '{n} %' OR t.\"Value\" like '% {n}')")); + + var sql = $"-- GetContentPathsWhereTheyAreAllowedChildren: [{string.Join(", ", names)}]" + + Environment.NewLine; + sql += @"SELECT n.""Path""::TEXT, t.""Value"" FROM ""LongTextProperties"" t + JOIN ""PropertyTypes"" p ON p.""PropertyTypeId"" = t.""PropertyTypeId"" + JOIN ""Versions"" v ON t.""VersionId"" = v.""VersionId"" + JOIN ""Nodes"" n ON n.""NodeId"" = v.""NodeId"" +WHERE p.""Name"" = 'AllowedChildTypes' AND ( +" + whereClausePart + @" +) +"; + using var op = SnTrace.Database.StartOperation("PgSqlPackagingDataProvider:" + + " GetContentPathsWhereTheyAreAllowedChildren()"); + + using var ctx = _mainProvider.CreateDataContext(CancellationToken.None); + var _ = ctx.ExecuteReaderAsync(sql, async (reader, cancel) => + { + cancel.ThrowIfCancellationRequested(); + while (await reader.ReadAsync(cancel).ConfigureAwait(false)) + { + cancel.ThrowIfCancellationRequested(); + result.Add(reader.GetString(0), reader.GetString(1)); + } + return STT.Task.FromResult(0); + }).GetAwaiter().GetResult(); + op.Successful = true; + + return result; + } + + /* =============================================================================== TOOLS */ + + private static string EncodePackageVersion(Version v) + { + if (v.Build < 0) + return $"{v.Major:0#########}.{v.Minor:0#########}"; + if (v.Revision < 0) + return $"{v.Major:0#########}.{v.Minor:0#########}.{v.Build:0#########}"; + return $"{v.Major:0#########}.{v.Minor:0#########}.{v.Build:0#########}.{v.Revision:0#########}"; + } + private static Version DecodePackageVersion(string s) + { + return s == null ? null : Version.Parse(s); + } + + private string SerializeExecutionError(Exception e) + { + if (e == null) + return null; + + var serializer = new JsonSerializer + { + NullValueHandling = NullValueHandling.Ignore + }; + try + { + using var sw = new StringWriter(); + using (JsonWriter writer = new JsonTextWriter(sw)) + serializer.Serialize(writer, e); + return sw.GetStringBuilder().ToString(); + } + catch (Exception ee) + { + using var sw = new StringWriter(); + using (JsonWriter writer = new JsonTextWriter(sw)) + serializer.Serialize(writer, new Exception("Cannot serialize the execution error: " + ee.Message)); + return sw.GetStringBuilder().ToString(); + } + } + private Exception DeserializeExecutionError(string data) + { + if (data == null) + return null; + + var serializer = new JsonSerializer(); + using var jreader = new JsonTextReader(new StringReader(data)); + return serializer.Deserialize(jreader); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlSchemaInstaller.cs b/src/ContentRepository.PostgreSql/Data/PgSqlSchemaInstaller.cs new file mode 100644 index 000000000..f19a3b854 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlSchemaInstaller.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.Linq; +using STT=System.Threading.Tasks; +using Npgsql; +using SenseNet.ContentRepository.Storage.DataModel; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + internal class PgSqlSchemaInstaller + { + private static readonly byte Yes = 1; + private static readonly byte No = 0; + + private readonly string _connectionString; + + public PgSqlSchemaInstaller(string connectionString) + { + _connectionString = connectionString; + } + + public async STT.Task InstallSchemaAsync(RepositorySchemaData schema) + { + await using var connection = new NpgsqlConnection(_connectionString); + await connection.OpenAsync().ConfigureAwait(false); + await using var transaction = await connection.BeginTransactionAsync().ConfigureAwait(false); + + try + { + await UpsertPropertyTypesAsync(schema.PropertyTypes, connection, transaction).ConfigureAwait(false); + await UpsertNodeTypesAsync(schema.NodeTypes, connection, transaction).ConfigureAwait(false); + await UpsertContentListTypesAsync(schema.ContentListTypes, connection, transaction).ConfigureAwait(false); + await transaction.CommitAsync().ConfigureAwait(false); + } + catch + { + await transaction.RollbackAsync().ConfigureAwait(false); + throw; + } + } + + private async STT.Task UpsertPropertyTypesAsync(List propertyTypes, + NpgsqlConnection connection, NpgsqlTransaction transaction) + { + foreach (var pt in propertyTypes) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""PropertyTypes"" (""PropertyTypeId"", ""Name"", ""DataType"", ""Mapping"", ""IsContentListProperty"") + VALUES (@Id, @Name, @DataType, @Mapping, @IsContentListProperty) + ON CONFLICT (""PropertyTypeId"") DO UPDATE SET + ""Name"" = EXCLUDED.""Name"", + ""DataType"" = EXCLUDED.""DataType"", + ""Mapping"" = EXCLUDED.""Mapping"", + ""IsContentListProperty"" = EXCLUDED.""IsContentListProperty""", + connection, transaction); + cmd.Parameters.AddWithValue("@Id", pt.Id); + cmd.Parameters.AddWithValue("@Name", pt.Name); + cmd.Parameters.AddWithValue("@DataType", pt.DataType.ToString()); + cmd.Parameters.AddWithValue("@Mapping", pt.Mapping); + cmd.Parameters.AddWithValue("@IsContentListProperty", pt.IsContentListProperty ? Yes : No); + await cmd.ExecuteNonQueryAsync().ConfigureAwait(false); + } + } + + private async STT.Task UpsertNodeTypesAsync(List nodeTypes, + NpgsqlConnection connection, NpgsqlTransaction transaction) + { + foreach (var nt in nodeTypes) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""NodeTypes"" (""NodeTypeId"", ""ParentId"", ""Name"", ""ClassName"", ""Properties"") + VALUES (@Id, @ParentId, @Name, @ClassName, @Properties) + ON CONFLICT (""NodeTypeId"") DO UPDATE SET + ""ParentId"" = EXCLUDED.""ParentId"", + ""Name"" = EXCLUDED.""Name"", + ""ClassName"" = EXCLUDED.""ClassName"", + ""Properties"" = EXCLUDED.""Properties""", + connection, transaction); + cmd.Parameters.AddWithValue("@Id", nt.Id); + var parentId = nodeTypes.FirstOrDefault(x => x.Name == nt.ParentName)?.Id; + cmd.Parameters.AddWithValue("@ParentId", (object)parentId ?? DBNull.Value); + cmd.Parameters.AddWithValue("@Name", nt.Name); + cmd.Parameters.AddWithValue("@ClassName", nt.ClassName); + cmd.Parameters.AddWithValue("@Properties", string.Join(" ", nt.Properties)); + await cmd.ExecuteNonQueryAsync().ConfigureAwait(false); + } + } + + private async STT.Task UpsertContentListTypesAsync(List contentListTypes, + NpgsqlConnection connection, NpgsqlTransaction transaction) + { + foreach (var clt in contentListTypes) + { + await using var cmd = new NpgsqlCommand( + @"INSERT INTO ""ContentListTypes"" (""ContentListTypeId"", ""Name"", ""Properties"") + VALUES (@Id, @Name, @Properties) + ON CONFLICT (""ContentListTypeId"") DO UPDATE SET + ""Name"" = EXCLUDED.""Name"", + ""Properties"" = EXCLUDED.""Properties""", + connection, transaction); + cmd.Parameters.AddWithValue("@Id", clt.Id); + cmd.Parameters.AddWithValue("@Name", clt.Name); + cmd.Parameters.AddWithValue("@Properties", string.Join(" ", clt.Properties)); + await cmd.ExecuteNonQueryAsync().ConfigureAwait(false); + } + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlSchemaWriter.cs b/src/ContentRepository.PostgreSql/Data/PgSqlSchemaWriter.cs new file mode 100644 index 000000000..572ab01af --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlSchemaWriter.cs @@ -0,0 +1,113 @@ +using System; +using System.Linq; +using STT=System.Threading.Tasks; +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Storage.DataModel; +using SenseNet.ContentRepository.Storage.Schema; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public class PgSqlSchemaWriter : SchemaWriter + { + private readonly string _connectionString; + + public PgSqlSchemaWriter(IOptions connectionStrings) + { + _connectionString = connectionStrings.Value.Repository; + } + + public override bool CanWriteDifferences => false; + + public override async STT.Task WriteSchemaAsync(RepositorySchemaData schema) + { + var propertyTypes = schema.PropertyTypes.Where(x => x.Id == 0).ToArray(); + if (propertyTypes.Any()) + { + var lastId = schema.PropertyTypes.Max(x => x.Id); + foreach (var propertyType in propertyTypes) + propertyType.Id = ++lastId; + } + var nodeTypes = schema.NodeTypes.Where(x => x.Id == 0).ToArray(); + if (nodeTypes.Any()) + { + var lastId = schema.NodeTypes.Max(x => x.Id); + foreach (var nodeType in nodeTypes) + nodeType.Id = ++lastId; + } + var contentListTypes = schema.ContentListTypes.Where(x => x.Id == 0).ToArray(); + if (contentListTypes.Any()) + { + var lastId = schema.ContentListTypes.Max(x => x.Id); + foreach (var contentListType in contentListTypes) + contentListType.Id = ++lastId; + } + + var installer = new PgSqlSchemaInstaller(_connectionString); + await installer.InstallSchemaAsync(schema).ConfigureAwait(false); + } + + #region unused methods + public override void Open() + { + throw new NotSupportedException(); + } + + public override void Close() + { + throw new NotSupportedException(); + } + + public override void CreatePropertyType(string name, DataType dataType, int mapping, bool isContentListProperty) + { + throw new NotSupportedException(); + } + + public override void DeletePropertyType(PropertyType propertyType) + { + throw new NotSupportedException(); + } + + public override void CreateNodeType(NodeType parent, string name, string className) + { + throw new NotSupportedException(); + } + + public override void ModifyNodeType(NodeType nodeType, NodeType parent, string className) + { + throw new NotSupportedException(); + } + + public override void DeleteNodeType(NodeType nodeType) + { + throw new NotSupportedException(); + } + + public override void CreateContentListType(string name) + { + throw new NotSupportedException(); + } + + public override void DeleteContentListType(ContentListType contentListType) + { + throw new NotSupportedException(); + } + + public override void AddPropertyTypeToPropertySet(PropertyType propertyType, PropertySet owner, bool isDeclared) + { + throw new NotSupportedException(); + } + + public override void RemovePropertyTypeFromPropertySet(PropertyType propertyType, PropertySet owner) + { + throw new NotSupportedException(); + } + + public override void UpdatePropertyTypeDeclarationState(PropertyType propertyType, NodeType owner, bool isDeclared) + { + throw new NotSupportedException(); + } + #endregion + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlSharedLockDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlSharedLockDataProvider.cs new file mode 100644 index 000000000..dc06dbd18 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlSharedLockDataProvider.cs @@ -0,0 +1,225 @@ +using System; +using System.Data; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.Diagnostics; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public class PgSqlSharedLockDataProvider : ISharedLockDataProvider + { + private readonly IRetrier _retrier; + private DataOptions DataOptions { get; } + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlSharedLockDataProvider(IOptions dataOptions, + IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + DataOptions = dataOptions?.Value ?? new DataOptions(); + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + } + + public TimeSpan SharedLockTimeout { get; } = TimeSpan.FromMinutes(30); + + public async System.Threading.Tasks.Task DeleteAllSharedLocksAsync(CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + await ctx.ExecuteNonQueryAsync(@"TRUNCATE TABLE ""SharedLocks""").ConfigureAwait(false); + } + + public async System.Threading.Tasks.Task CreateSharedLockAsync(int contentId, string @lock, + CancellationToken cancellationToken) + { + using var op = SnTrace.Database.StartOperation("PgSqlSharedLockDataProvider: " + + "CreateSharedLock(contentId: {0})", contentId); + + var timeLimit = DateTime.UtcNow.AddTicks(-SharedLockTimeout.Ticks); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + + // Delete expired locks first + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""SharedLocks"" WHERE ""CreationDate"" < @TimeLimit", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit)); + }).ConfigureAwait(false); + + // Insert new lock + await ctx.ExecuteNonQueryAsync( + @"INSERT INTO ""SharedLocks"" (""ContentId"", ""Lock"", ""CreationDate"") +VALUES (@ContentId, @Lock, @Now) +ON CONFLICT (""ContentId"") DO UPDATE SET ""Lock"" = @Lock, ""CreationDate"" = @Now +WHERE ""SharedLocks"".""Lock"" = @Lock", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@ContentId", DbType.Int32, contentId), + ctx.CreateParameter("@Lock", DbType.String, 1000, @lock), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }).ConfigureAwait(false); + + op.Successful = true; + } + + public async Task ModifySharedLockAsync(int contentId, string @lock, string newLock, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + + var timeLimit = DateTime.UtcNow.AddTicks(-SharedLockTimeout.Ticks); + + // Delete expired + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""SharedLocks"" WHERE ""CreationDate"" < @TimeLimit", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit)); + }).ConfigureAwait(false); + + // Get current lock + var existingLock = await ctx.ExecuteScalarAsync( + @"SELECT ""Lock"" FROM ""SharedLocks"" WHERE ""ContentId"" = @ContentId", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ContentId", DbType.Int32, contentId)); + }).ConfigureAwait(false); + + if (existingLock == null || existingLock == DBNull.Value) + throw new SharedLockNotFoundException("Content is not locked."); + + var currentLock = (string)existingLock; + if (currentLock != @lock) + return currentLock; + + await ctx.ExecuteNonQueryAsync( + @"UPDATE ""SharedLocks"" SET ""Lock"" = @NewLock, ""CreationDate"" = @Now +WHERE ""ContentId"" = @ContentId AND ""Lock"" = @Lock", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@ContentId", DbType.Int32, contentId), + ctx.CreateParameter("@Lock", DbType.String, 1000, @lock), + ctx.CreateParameter("@NewLock", DbType.String, 1000, newLock), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }).ConfigureAwait(false); + + return newLock; + } + + public async Task GetSharedLockAsync(int contentId, CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var timeLimit = DateTime.UtcNow.AddTicks(-SharedLockTimeout.Ticks); + + var result = await ctx.ExecuteScalarAsync( + @"SELECT ""Lock"" FROM ""SharedLocks"" +WHERE ""ContentId"" = @ContentId AND ""CreationDate"" >= @TimeLimit", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@ContentId", DbType.Int32, contentId), + ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit), + }); + }).ConfigureAwait(false); + + return result == null || result == DBNull.Value ? null : (string)result; + } + + public async Task DeleteSharedLockAsync(int contentId, string @lock, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var timeLimit = DateTime.UtcNow.AddTicks(-SharedLockTimeout.Ticks); + + // Delete expired + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""SharedLocks"" WHERE ""CreationDate"" < @TimeLimit", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit)); + }).ConfigureAwait(false); + + var existingLock = await ctx.ExecuteScalarAsync( + @"SELECT ""Lock"" FROM ""SharedLocks"" WHERE ""ContentId"" = @ContentId", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ContentId", DbType.Int32, contentId)); + }).ConfigureAwait(false); + + if (existingLock == null || existingLock == DBNull.Value) + throw new SharedLockNotFoundException("Content is not locked."); + + var currentLock = (string)existingLock; + if (currentLock != @lock) + return currentLock; + + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""SharedLocks"" WHERE ""ContentId"" = @ContentId AND ""Lock"" = @Lock", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@ContentId", DbType.Int32, contentId), + ctx.CreateParameter("@Lock", DbType.String, 1000, @lock), + }); + }).ConfigureAwait(false); + + return @lock; + } + + public async Task RefreshSharedLockAsync(int contentId, string @lock, + CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var timeLimit = DateTime.UtcNow.AddTicks(-SharedLockTimeout.Ticks); + + // Delete expired + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""SharedLocks"" WHERE ""CreationDate"" < @TimeLimit", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit)); + }).ConfigureAwait(false); + + var existingLock = await ctx.ExecuteScalarAsync( + @"SELECT ""Lock"" FROM ""SharedLocks"" WHERE ""ContentId"" = @ContentId", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@ContentId", DbType.Int32, contentId)); + }).ConfigureAwait(false); + + if (existingLock == null || existingLock == DBNull.Value) + throw new SharedLockNotFoundException("Content is not locked."); + + var currentLock = (string)existingLock; + if (currentLock != @lock) + return currentLock; + + await ctx.ExecuteNonQueryAsync( + @"UPDATE ""SharedLocks"" SET ""CreationDate"" = @Now +WHERE ""ContentId"" = @ContentId AND ""Lock"" = @Lock", cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@ContentId", DbType.Int32, contentId), + ctx.CreateParameter("@Lock", DbType.String, 1000, @lock), + ctx.CreateParameter("@Now", DbType.DateTime2, DateTime.UtcNow), + }); + }).ConfigureAwait(false); + + return @lock; + } + + public async System.Threading.Tasks.Task CleanupSharedLocksAsync(CancellationToken cancellationToken) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancellationToken); + var timeLimit = DateTime.UtcNow.AddTicks(-SharedLockTimeout.Ticks); + await ctx.ExecuteNonQueryAsync( + @"DELETE FROM ""SharedLocks"" WHERE ""CreationDate"" < @TimeLimit", cmd => + { + cmd.Parameters.Add(ctx.CreateParameter("@TimeLimit", DbType.DateTime2, timeLimit)); + }).ConfigureAwait(false); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Data/PgSqlStatisticalDataProvider.cs b/src/ContentRepository.PostgreSql/Data/PgSqlStatisticalDataProvider.cs new file mode 100644 index 000000000..36cbaedd3 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Data/PgSqlStatisticalDataProvider.cs @@ -0,0 +1,409 @@ +using System; +using System.Collections.Generic; +using System.Data; +using System.Data.Common; +using System.Threading; +using System.Threading.Tasks; + +using Microsoft.Extensions.Options; +using SenseNet.Configuration; +using SenseNet.Diagnostics; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient +{ + public class PgSqlStatisticalDataProvider : IStatisticalDataProvider + { + private readonly IRetrier _retrier; + private DataOptions DataOptions { get; } + private ConnectionStringOptions ConnectionStrings { get; } + + public PgSqlStatisticalDataProvider(IOptions dataOptions, + IOptions connectionOptions, IRetrier retrier) + { + _retrier = retrier; + DataOptions = dataOptions?.Value ?? new DataOptions(); + ConnectionStrings = connectionOptions?.Value ?? new ConnectionStringOptions(); + } + + /* =============================================================================================== Write */ + + private static readonly string WriteDataScript = @"-- PgSqlStatisticalDataProvider.WriteData +INSERT INTO ""StatisticalData"" + (""DataType"", ""WrittenTime"", ""CreationTime"", ""Duration"", ""RequestLength"", ""ResponseLength"", + ""ResponseStatusCode"", ""Url"", ""TargetId"", ""ContentId"", ""EventName"", ""ErrorMessage"", ""GeneralData"") +VALUES (@DataType, @WrittenTime, @CreationTime, @Duration, @RequestLength, @ResponseLength, + @ResponseStatusCode, @Url, @TargetId, @ContentId, @EventName, @ErrorMessage, @GeneralData) +"; + + public async System.Threading.Tasks.Task WriteDataAsync(IStatisticalDataRecord data, CancellationToken cancel) + { + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + await ctx.ExecuteNonQueryAsync(WriteDataScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, 50, data.DataType), + ctx.CreateParameter("@WrittenTime", DbType.DateTime2, data.WrittenTime == default ? DateTime.UtcNow : data.WrittenTime), + ctx.CreateParameter("@CreationTime", DbType.DateTime2, (object)data.CreationTime ?? DBNull.Value), + ctx.CreateParameter("@Duration", DbType.Int64, (object)data.Duration?.Ticks ?? DBNull.Value), + ctx.CreateParameter("@RequestLength", DbType.Int64, (object)data.RequestLength ?? DBNull.Value), + ctx.CreateParameter("@ResponseLength", DbType.Int64, (object)data.ResponseLength ?? DBNull.Value), + ctx.CreateParameter("@ResponseStatusCode", DbType.Int32, (object)data.ResponseStatusCode ?? DBNull.Value), + ctx.CreateParameter("@Url", DbType.String, int.MaxValue, (object)data.Url ?? DBNull.Value), + ctx.CreateParameter("@TargetId", DbType.Int32, (object)data.TargetId ?? DBNull.Value), + ctx.CreateParameter("@ContentId", DbType.Int32, (object)data.ContentId ?? DBNull.Value), + ctx.CreateParameter("@EventName", DbType.String, 450, (object)data.EventName ?? DBNull.Value), + ctx.CreateParameter("@ErrorMessage", DbType.String, int.MaxValue, (object)data.ErrorMessage ?? DBNull.Value), + ctx.CreateParameter("@GeneralData", DbType.String, int.MaxValue, (object)data.GeneralData ?? DBNull.Value), + }); + }).ConfigureAwait(false); + } + + /* =============================================================================================== Load */ + + private static readonly string LoadUsageListScript = @"-- PgSqlStatisticalDataProvider.LoadUsageList +SELECT ""Id"", ""DataType"", ""WrittenTime"", ""CreationTime"", ""Duration"", + ""RequestLength"", ""ResponseLength"", ""ResponseStatusCode"", ""Url"", + ""TargetId"", ""ContentId"", ""EventName"", ""ErrorMessage"", ""GeneralData"" +FROM ""StatisticalData"" +WHERE ""DataType"" = @DataType AND ""WrittenTime"" < @EndTime +"; + + public async Task> LoadUsageListAsync( + string dataType, int[] relatedTargetIds, DateTime endTimeExclusive, + int count, CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "LoadUsageList(dataType: {0}, endTimeExclusive: {1:yyyy-MM-dd HH:mm:ss.fffff})", + dataType, endTimeExclusive); + + var sql = LoadUsageListScript; + if (relatedTargetIds != null && relatedTargetIds.Length > 0) + sql += " AND \"TargetId\" IN (" + string.Join(",", relatedTargetIds) + ")"; + sql += " ORDER BY \"WrittenTime\" DESC LIMIT @Count"; + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + var records = new List(); + await ctx.ExecuteReaderAsync(sql, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, 50, dataType), + ctx.CreateParameter("@EndTime", DbType.DateTime2, endTimeExclusive), + ctx.CreateParameter("@Count", DbType.Int32, count), + }); + }, async (reader, c) => + { + while (await reader.ReadAsync(c).ConfigureAwait(false)) + records.Add(GetStatisticalDataRecordFromReader(reader)); + return true; + }).ConfigureAwait(false); + op.Successful = true; + + return records; + } + + /* =============================================================================================== Aggregation */ + + private static readonly string LoadAggregatedUsageScript = @"-- PgSqlStatisticalDataProvider.LoadAggregatedUsage +SELECT * FROM ""StatisticalAggregations"" +WHERE ""DataType"" = @DataType AND ""Resolution"" = @Resolution AND ""Date"" >= @StartTime AND ""Date"" < @EndTimeExclusive +ORDER BY ""Date"" +"; + + public async Task> LoadAggregatedUsageAsync( + string dataType, TimeResolution resolution, DateTime startTime, DateTime endTimeExclusive, + CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "LoadAggregatedUsage(dataType: {0}, resolution: {1}, startTime: {2:yyyy-MM-dd HH:mm:ss.fffff}, endTimeExclusive: {3:yyyy-MM-dd HH:mm:ss.fffff})", + dataType, resolution, startTime, endTimeExclusive); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + var aggregations = new List(); + await ctx.ExecuteReaderAsync(LoadAggregatedUsageScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, dataType), + ctx.CreateParameter("@Resolution", DbType.String, resolution.ToString()), + ctx.CreateParameter("@StartTime", DbType.DateTime2, startTime), + ctx.CreateParameter("@EndTimeExclusive", DbType.DateTime2, endTimeExclusive), + }); + }, async (reader, c) => + { + while (await reader.ReadAsync(c).ConfigureAwait(false)) + aggregations.Add(GetAggregationFromReader(reader)); + return true; + }).ConfigureAwait(false); + op.Successful = true; + + return aggregations; + } + + private static readonly string LoadFirstAggregationTimesByResolutionsScript = @"-- PgSqlStatisticalDataProvider.LoadFirstAggregationTimesByResolutions +SELECT + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""DataType"" = @DataType AND ""Resolution"" = 'Minute' ORDER BY ""Date"" LIMIT 1) AS ""Minute"", + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""DataType"" = @DataType AND ""Resolution"" = 'Hour' ORDER BY ""Date"" LIMIT 1) AS ""Hour"", + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""DataType"" = @DataType AND ""Resolution"" = 'Day' ORDER BY ""Date"" LIMIT 1) AS ""Day"", + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""DataType"" = @DataType AND ""Resolution"" = 'Month' ORDER BY ""Date"" LIMIT 1) AS ""Month"" +"; + + public async Task LoadFirstAggregationTimesByResolutionsAsync(string dataType, + CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "LoadFirstAggregationTimesByResolutions(dataType: {0})", dataType); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + var result = new DateTime?[4]; + await ctx.ExecuteReaderAsync(LoadFirstAggregationTimesByResolutionsScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, dataType), + }); + }, async (reader, c) => + { + if (await reader.ReadAsync(c).ConfigureAwait(false)) + { + result[0] = reader.IsDBNull(reader.GetOrdinal("Minute")) ? null : reader.GetDateTime(reader.GetOrdinal("Minute")); + result[1] = reader.IsDBNull(reader.GetOrdinal("Hour")) ? null : reader.GetDateTime(reader.GetOrdinal("Hour")); + result[2] = reader.IsDBNull(reader.GetOrdinal("Day")) ? null : reader.GetDateTime(reader.GetOrdinal("Day")); + result[3] = reader.IsDBNull(reader.GetOrdinal("Month")) ? null : reader.GetDateTime(reader.GetOrdinal("Month")); + } + return true; + }).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + private static readonly string LoadLastAggregationTimesByResolutionsScript = @"-- PgSqlStatisticalDataProvider.LoadLastAggregationTimesByResolutions +SELECT + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""Resolution"" = 'Minute' ORDER BY ""Date"" DESC LIMIT 1) AS ""Minute"", + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""Resolution"" = 'Hour' ORDER BY ""Date"" DESC LIMIT 1) AS ""Hour"", + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""Resolution"" = 'Day' ORDER BY ""Date"" DESC LIMIT 1) AS ""Day"", + (SELECT ""Date"" FROM ""StatisticalAggregations"" WHERE ""Resolution"" = 'Month' ORDER BY ""Date"" DESC LIMIT 1) AS ""Month"" +"; + + public async Task LoadLastAggregationTimesByResolutionsAsync(CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "LoadLastAggregationTimesByResolutions()"); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + var result = new DateTime?[4]; + await ctx.ExecuteReaderAsync(LoadLastAggregationTimesByResolutionsScript, async (reader, c) => + { + if (await reader.ReadAsync(c).ConfigureAwait(false)) + { + result[0] = reader.IsDBNull(reader.GetOrdinal("Minute")) ? null : reader.GetDateTime(reader.GetOrdinal("Minute")); + result[1] = reader.IsDBNull(reader.GetOrdinal("Hour")) ? null : reader.GetDateTime(reader.GetOrdinal("Hour")); + result[2] = reader.IsDBNull(reader.GetOrdinal("Day")) ? null : reader.GetDateTime(reader.GetOrdinal("Day")); + result[3] = reader.IsDBNull(reader.GetOrdinal("Month")) ? null : reader.GetDateTime(reader.GetOrdinal("Month")); + } + return true; + }).ConfigureAwait(false); + op.Successful = true; + + return result; + } + + /* =============================================================================================== EnumerateData */ + + private static readonly string EnumerateDataScript = @"-- PgSqlStatisticalDataProvider.EnumerateData +SELECT * FROM ""StatisticalData"" +WHERE ""DataType"" = @DataType AND ""CreationTime"" >= @StartTime AND ""CreationTime"" < @EndTimeExclusive +ORDER BY ""CreationTime"" +"; + + public async System.Threading.Tasks.Task EnumerateDataAsync(string dataType, DateTime startTime, DateTime endTimeExclusive, + Action aggregatorCallback, CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "EnumerateData(dataType: {0}, startTime: {1:yyyy-MM-dd HH:mm:ss.fffff}, endTimeExclusive: {2:yyyy-MM-dd HH:mm:ss.fffff})", + dataType, startTime, endTimeExclusive); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + await ctx.ExecuteReaderAsync(EnumerateDataScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, dataType), + ctx.CreateParameter("@StartTime", DbType.DateTime2, startTime), + ctx.CreateParameter("@EndTimeExclusive", DbType.DateTime2, endTimeExclusive), + }); + }, async (reader, c) => + { + while (await reader.ReadAsync(c).ConfigureAwait(false)) + { + c.ThrowIfCancellationRequested(); + var item = GetStatisticalDataRecordFromReader(reader); + aggregatorCallback(item); + } + return true; + }).ConfigureAwait(false); + op.Successful = true; + } + + /* =============================================================================================== WriteAggregation */ + + private static readonly string WriteAggregationScript = @"-- PgSqlStatisticalDataProvider.WriteAggregation +INSERT INTO ""StatisticalAggregations"" (""DataType"", ""Date"", ""Resolution"", ""Data"") +VALUES (@DataType, @Date, @Resolution, @Data) +ON CONFLICT (""DataType"", ""Date"", ""Resolution"") DO UPDATE SET ""Data"" = EXCLUDED.""Data"" +"; + + public async System.Threading.Tasks.Task WriteAggregationAsync(Aggregation aggregation, CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "WriteAggregation: DataType: {0}, Resolution: {1}, Date: {2:yyyy-MM-dd HH:mm:ss.fffff})", + aggregation.DataType, aggregation.Resolution, aggregation.Date); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + await ctx.ExecuteNonQueryAsync(WriteAggregationScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, aggregation.DataType), + ctx.CreateParameter("@Resolution", DbType.String, aggregation.Resolution.ToString()), + ctx.CreateParameter("@Date", DbType.DateTime2, aggregation.Date), + ctx.CreateParameter("@Data", DbType.String, (object)aggregation.Data ?? DBNull.Value), + }); + }).ConfigureAwait(false); + op.Successful = true; + } + + /* =============================================================================================== Cleanup */ + + private static readonly string CleanupRecordsScript = @"-- PgSqlStatisticalDataProvider.CleanupRecords +DELETE FROM ""StatisticalData"" WHERE ""DataType"" = @DataType AND ""CreationTime"" < @RetentionTime +"; + + public async System.Threading.Tasks.Task CleanupRecordsAsync(string dataType, DateTime retentionTime, CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "CleanupRecords(dataType: {0}, retentionTime: {1:yyyy-MM-dd HH:mm:ss.fffff})", + dataType, retentionTime); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + await ctx.ExecuteNonQueryAsync(CleanupRecordsScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, 50, dataType), + ctx.CreateParameter("@RetentionTime", DbType.DateTime2, retentionTime), + }); + }).ConfigureAwait(false); + op.Successful = true; + } + + private static readonly string CleanupAggregationsScript = @"-- PgSqlStatisticalDataProvider.CleanupAggregations +DELETE FROM ""StatisticalAggregations"" WHERE ""DataType"" = @DataType AND ""Resolution"" = @Resolution AND ""Date"" < @RetentionTime +"; + + public async System.Threading.Tasks.Task CleanupAggregationsAsync(string dataType, TimeResolution resolution, + DateTime retentionTime, CancellationToken cancel) + { + using var op = SnTrace.Database.StartOperation("PgSqlStatisticalDataProvider: " + + "CleanupAggregations(dataType: {0}, resolution: {1}, retentionTime: {2:yyyy-MM-dd HH:mm:ss.fffff})", + dataType, resolution, retentionTime); + + using var ctx = new PgSqlDataContext(ConnectionStrings.Repository, DataOptions, _retrier, cancel); + await ctx.ExecuteNonQueryAsync(CleanupAggregationsScript, cmd => + { + cmd.Parameters.AddRange(new[] + { + ctx.CreateParameter("@DataType", DbType.String, 50, dataType), + ctx.CreateParameter("@Resolution", DbType.String, resolution.ToString()), + ctx.CreateParameter("@RetentionTime", DbType.DateTime2, retentionTime), + }); + }).ConfigureAwait(false); + op.Successful = true; + } + + /* =============================================================================================== Helpers */ + + private IStatisticalDataRecord GetStatisticalDataRecordFromReader(DbDataReader reader) + { + var durationIndex = reader.GetOrdinal("Duration"); + return new StatisticalDataRecord + { + Id = reader.GetInt32(reader.GetOrdinal("Id")), + DataType = reader.GetString(reader.GetOrdinal("DataType")), + WrittenTime = reader.IsDBNull(reader.GetOrdinal("WrittenTime")) + ? DateTime.MinValue : reader.GetDateTime(reader.GetOrdinal("WrittenTime")), + CreationTime = reader.IsDBNull(reader.GetOrdinal("CreationTime")) + ? null : reader.GetDateTime(reader.GetOrdinal("CreationTime")), + Duration = reader.IsDBNull(durationIndex) + ? null : TimeSpan.FromTicks(reader.GetInt64(durationIndex)), + RequestLength = reader.IsDBNull(reader.GetOrdinal("RequestLength")) + ? null : reader.GetInt64(reader.GetOrdinal("RequestLength")), + ResponseLength = reader.IsDBNull(reader.GetOrdinal("ResponseLength")) + ? null : reader.GetInt64(reader.GetOrdinal("ResponseLength")), + ResponseStatusCode = reader.IsDBNull(reader.GetOrdinal("ResponseStatusCode")) + ? null : reader.GetInt32(reader.GetOrdinal("ResponseStatusCode")), + Url = reader.IsDBNull(reader.GetOrdinal("Url")) + ? null : reader.GetString(reader.GetOrdinal("Url")), + TargetId = reader.IsDBNull(reader.GetOrdinal("TargetId")) + ? null : reader.GetInt32(reader.GetOrdinal("TargetId")), + ContentId = reader.IsDBNull(reader.GetOrdinal("ContentId")) + ? null : reader.GetInt32(reader.GetOrdinal("ContentId")), + EventName = reader.IsDBNull(reader.GetOrdinal("EventName")) + ? null : reader.GetString(reader.GetOrdinal("EventName")), + ErrorMessage = reader.IsDBNull(reader.GetOrdinal("ErrorMessage")) + ? null : reader.GetString(reader.GetOrdinal("ErrorMessage")), + GeneralData = reader.IsDBNull(reader.GetOrdinal("GeneralData")) + ? null : reader.GetString(reader.GetOrdinal("GeneralData")), + }; + } + + private Aggregation GetAggregationFromReader(DbDataReader reader) + { + return new Aggregation + { + DataType = reader.GetString(reader.GetOrdinal("DataType")), + Date = reader.GetDateTime(reader.GetOrdinal("Date")), + Resolution = (TimeResolution)Enum.Parse(typeof(TimeResolution), + reader.GetString(reader.GetOrdinal("Resolution"))), + Data = reader.IsDBNull(reader.GetOrdinal("Data")) + ? null : reader.GetString(reader.GetOrdinal("Data")), + }; + } + + // =============================================================================================== Installation + + public static readonly string CreationScript = @"-- PgSqlStatisticalDataProvider.CreateTables +CREATE TABLE IF NOT EXISTS ""StatisticalData"" ( + ""Id"" SERIAL PRIMARY KEY, + ""DataType"" VARCHAR(50) NOT NULL, + ""CreationTime"" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + ""WrittenTime"" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + ""Duration"" BIGINT NULL, + ""RequestLength"" BIGINT NULL, + ""ResponseLength"" BIGINT NULL, + ""ResponseStatusCode"" INT NULL, + ""Url"" VARCHAR(1000) NULL, + ""TargetId"" INT NULL, + ""ContentId"" INT NULL, + ""EventName"" VARCHAR(50) NULL, + ""ErrorMessage"" VARCHAR(500) NULL, + ""GeneralData"" TEXT NULL +); + +CREATE INDEX IF NOT EXISTS ""IX_StatisticalData_DataType_CreationTime"" + ON ""StatisticalData"" (""DataType"", ""CreationTime""); + +CREATE TABLE IF NOT EXISTS ""StatisticalAggregations"" ( + ""DataType"" VARCHAR(50) NOT NULL, + ""Date"" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + ""Resolution"" VARCHAR(10) NOT NULL, + ""Data"" TEXT NULL, + CONSTRAINT ""PK_StatisticalAggregations"" PRIMARY KEY (""DataType"", ""Date"", ""Resolution"") +); +"; + } +} diff --git a/src/ContentRepository.PostgreSql/PgSqlExtensions.cs b/src/ContentRepository.PostgreSql/PgSqlExtensions.cs new file mode 100644 index 000000000..280ed8f79 --- /dev/null +++ b/src/ContentRepository.PostgreSql/PgSqlExtensions.cs @@ -0,0 +1,102 @@ +using System; +using System.Linq; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using SenseNet.Configuration; +using SenseNet.ContentRepository.Components; +using SenseNet.ContentRepository.Security.Clients; +using SenseNet.ContentRepository.Storage; +using SenseNet.ContentRepository.Storage.Data; +using SenseNet.ContentRepository.Storage.Data.MsSqlClient; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient; +using SenseNet.ContentRepository.Storage.Data.PgSqlClient.Security; +using SenseNet.Security; +using SenseNet.Storage.Data.PgSqlClient; +using SenseNet.Tools; + +// ReSharper disable once CheckNamespace +namespace SenseNet.Extensions.DependencyInjection +{ + public static class PgSqlExtensions + { + /// + /// Adds PostgreSQL implementations of data related services to the service collection. + /// + public static IServiceCollection AddSenseNetPgSqlProviders(this IServiceCollection services, + Action configureConnectionStrings = null, + Action configureInstallation = null, + Action configureDataOptions = null) + { + return services.AddSenseNetPgSqlDataProvider() + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSenseNetPgSqlStatisticalDataProvider() + .AddDatabaseAuditEventWriter() + .AddSenseNetPgSqlClientStoreDataProvider() + .AddComponent() + .AddComponent() + .AddComponent() + + // Override blob storage providers with PostgreSQL implementations + .AddSingleton() + .AddSingleton() + // Replace the MSSQL BuiltInBlobProvider with the PgSql one in the IBlobProvider + // collection. BlobProviderStore resolves providers via IBlobProvider, and + // IBuiltInBlobProvider is found by scanning that collection. Without this, + // the MSSQL BuiltInBlobProvider (registered by AddSenseNetBlobStorage) would + // be used for WriteChunk, leading to "Keyword not supported: 'host'" errors. + .RemoveAll() + .AddSenseNetBlobProvider() + + .Configure(options => { configureConnectionStrings?.Invoke(options); }) + .Configure(options => { configureInstallation?.Invoke(options); }) + .Configure(options => { configureDataOptions?.Invoke(options); }) + ; + } + + /// + /// Adds the default PostgreSQL data provider to the service collection. + /// + public static IServiceCollection AddSenseNetPgSqlDataProvider(this IServiceCollection services) + { + return services.AddSenseNetDataProvider() + .AddSenseNetDataInstaller() + .AddSingleton() + .Configure(_ => + { + // this method is for making sure that the option object is registered + }); + } + + /// + /// Adds the PostgreSQL statistical data provider to the service collection. + /// + public static IServiceCollection AddSenseNetPgSqlStatisticalDataProvider(this IServiceCollection services) + { + return services.AddStatisticalDataProvider(); + } + + /// + /// Adds the PostgreSQL ClientStore data provider to the service collection. + /// + public static IServiceCollection AddSenseNetPgSqlClientStoreDataProvider(this IServiceCollection services) + { + return services.AddSenseNetClientStoreDataProvider(); + } + + /// + /// Adds the PostgreSQL security data provider to the service collection. + /// Replaces the SQL Server-only EFCSecurityDataProvider. + /// + public static IServiceCollection AddPgSqlSecurityDataProvider(this IServiceCollection services, + Action configure = null) + { + if (configure != null) + services.Configure(configure); + + return services.AddSingleton(); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Scripts/PgSqlInstall_Schema.sql b/src/ContentRepository.PostgreSql/Scripts/PgSqlInstall_Schema.sql new file mode 100644 index 000000000..1bc06e033 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Scripts/PgSqlInstall_Schema.sql @@ -0,0 +1,681 @@ +------------------------------------------------------------ +-- sensenet PostgreSQL Schema Installation Script +------------------------------------------------------------ + +-- Enable required extensions +CREATE EXTENSION IF NOT EXISTS citext; + +------------------------------------------------------------ +-- DROP EXISTING TABLES (reverse dependency order) +------------------------------------------------------------ +DROP TABLE IF EXISTS "SharedLocks" CASCADE; +DROP TABLE IF EXISTS "AccessTokens" CASCADE; +DROP TABLE IF EXISTS "TreeLocks" CASCADE; +DROP TABLE IF EXISTS "Packages" CASCADE; +DROP TABLE IF EXISTS "SchemaModification" CASCADE; +DROP TABLE IF EXISTS "WorkflowNotification" CASCADE; +DROP TABLE IF EXISTS "IndexingActivities" CASCADE; +DROP TABLE IF EXISTS "LogEntries" CASCADE; +DROP TABLE IF EXISTS "JournalItems" CASCADE; +DROP TABLE IF EXISTS "ReferenceProperties" CASCADE; +DROP TABLE IF EXISTS "BinaryProperties" CASCADE; +DROP TABLE IF EXISTS "Files" CASCADE; +DROP TABLE IF EXISTS "LongTextProperties" CASCADE; +DROP TABLE IF EXISTS "Versions" CASCADE; +DROP TABLE IF EXISTS "Nodes" CASCADE; +DROP TABLE IF EXISTS "PropertyTypes" CASCADE; +DROP TABLE IF EXISTS "ContentListTypes" CASCADE; +DROP TABLE IF EXISTS "NodeTypes" CASCADE; + +-- Drop existing views +DROP VIEW IF EXISTS "NodeInfoView"; +DROP VIEW IF EXISTS "ReferencesInfoView"; +DROP VIEW IF EXISTS "PermissionInfoView"; +DROP VIEW IF EXISTS "MembershipInfoView"; + +------------------------------------------------------------ +-- Timestamp trigger function (emulates MSSQL rowversion) +------------------------------------------------------------ +CREATE OR REPLACE FUNCTION update_timestamp() +RETURNS TRIGGER AS $$ +BEGIN + NEW."Timestamp" := COALESCE(OLD."Timestamp", 0) + 1; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +------------------------------------------------------------ +-- CREATE TABLES +------------------------------------------------------------ + +-- NodeTypes +CREATE TABLE IF NOT EXISTS "NodeTypes" ( + "NodeTypeId" SERIAL PRIMARY KEY, + "ParentId" INT NULL, + "Name" VARCHAR(450) NOT NULL, + "ClassName" VARCHAR(450) NULL, + "Properties" TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS "ix_nodetypes_parentid" ON "NodeTypes" ("ParentId"); +CREATE INDEX IF NOT EXISTS "ix_nodetypes_name" ON "NodeTypes" ("Name") INCLUDE ("NodeTypeId"); + +-- ContentListTypes +CREATE TABLE IF NOT EXISTS "ContentListTypes" ( + "ContentListTypeId" SERIAL PRIMARY KEY, + "Name" VARCHAR(450) NOT NULL, + "Properties" TEXT NOT NULL +); + +-- PropertyTypes +CREATE TABLE IF NOT EXISTS "PropertyTypes" ( + "PropertyTypeId" SERIAL PRIMARY KEY, + "Name" VARCHAR(450) NOT NULL, + "DataType" VARCHAR(10) NOT NULL, + "Mapping" INT NOT NULL, + "IsContentListProperty" SMALLINT NOT NULL DEFAULT 0 +); + +CREATE INDEX IF NOT EXISTS "ix_propertytypes_name" ON "PropertyTypes" ("Name") INCLUDE ("PropertyTypeId"); + +-- Nodes +CREATE TABLE IF NOT EXISTS "Nodes" ( + "NodeId" SERIAL PRIMARY KEY, + "NodeTypeId" INT NOT NULL, + "ContentListTypeId" INT NULL, + "ContentListId" INT NULL, + "CreatingInProgress" SMALLINT NOT NULL DEFAULT 0, + "IsDeleted" SMALLINT NOT NULL, + "IsInherited" SMALLINT NOT NULL DEFAULT 1, + "ParentNodeId" INT NULL, + "Name" VARCHAR(450) NOT NULL, + "Path" CITEXT NOT NULL, + "Index" INT NOT NULL, + "Locked" SMALLINT NOT NULL, + "LockedById" INT NULL, + "ETag" VARCHAR(50) NOT NULL, + "LockType" INT NOT NULL, + "LockTimeout" INT NOT NULL, + "LockDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "LockToken" VARCHAR(50) NOT NULL, + "LastLockUpdate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "LastMinorVersionId" INT NULL, + "LastMajorVersionId" INT NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "CreatedById" INT NOT NULL, + "ModificationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ModifiedById" INT NOT NULL, + "DisplayName" VARCHAR(450) NULL, + "IsSystem" SMALLINT NULL, + "OwnerId" INT NOT NULL, + "SavingState" INT NULL, + "RowGuid" UUID NOT NULL DEFAULT gen_random_uuid(), + "Timestamp" BIGINT NOT NULL DEFAULT 0 +); + +CREATE UNIQUE INDEX IF NOT EXISTS "IX_Nodes_Path" ON "Nodes" ("Path") INCLUDE ("NodeId"); +CREATE INDEX IF NOT EXISTS "IX_Nodes_ParentNodeId" ON "Nodes" ("ParentNodeId"); +CREATE INDEX IF NOT EXISTS "IX_Nodes_NodeTypeId" ON "Nodes" ("NodeTypeId"); + +CREATE TRIGGER trg_update_timestamp_nodes + BEFORE UPDATE ON "Nodes" + FOR EACH ROW + EXECUTE FUNCTION update_timestamp(); + +-- Versions +CREATE TABLE IF NOT EXISTS "Versions" ( + "VersionId" SERIAL PRIMARY KEY, + "NodeId" INT NOT NULL, + "MajorNumber" SMALLINT NOT NULL, + "MinorNumber" SMALLINT NOT NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "CreatedById" INT NOT NULL, + "ModificationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ModifiedById" INT NOT NULL, + "Status" SMALLINT NOT NULL DEFAULT 1, + "IndexDocument" TEXT NULL, + "ChangedData" TEXT NULL, + "DynamicProperties" TEXT NULL, + "ContentListProperties" TEXT NULL, + "RowGuid" UUID NOT NULL DEFAULT gen_random_uuid(), + "Timestamp" BIGINT NOT NULL DEFAULT 0 +); + +CREATE INDEX IF NOT EXISTS "ix_Versions_NodeId" ON "Versions" ("NodeId"); +CREATE INDEX IF NOT EXISTS "ix_Versions_NodeId_MinorNumber_MajorNumber_Status" + ON "Versions" ("NodeId", "MinorNumber", "Status"); + +CREATE TRIGGER trg_update_timestamp_versions + BEFORE UPDATE ON "Versions" + FOR EACH ROW + EXECUTE FUNCTION update_timestamp(); + +-- Files +CREATE TABLE IF NOT EXISTS "Files" ( + "FileId" SERIAL PRIMARY KEY, + "ContentType" VARCHAR(450) NOT NULL, + "FileNameWithoutExtension" VARCHAR(450) NULL, + "Extension" VARCHAR(50) NOT NULL, + "Size" BIGINT NOT NULL, + "Checksum" VARCHAR(200) NULL, + "Stream" BYTEA NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'), + "RowGuid" UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + "Timestamp" BIGINT NOT NULL DEFAULT 0, + "Staging" BOOLEAN NULL, + "StagingVersionId" INT NULL, + "StagingPropertyTypeId" INT NULL, + "IsDeleted" BOOLEAN NULL, + "BlobProvider" VARCHAR(450) NULL, + "BlobProviderData" TEXT NULL +); + +CREATE TRIGGER trg_update_timestamp_files + BEFORE UPDATE ON "Files" + FOR EACH ROW + EXECUTE FUNCTION update_timestamp(); + +-- BinaryProperties +CREATE TABLE IF NOT EXISTS "BinaryProperties" ( + "BinaryPropertyId" SERIAL PRIMARY KEY, + "VersionId" INT NULL, + "PropertyTypeId" INT NULL, + "FileId" INT NOT NULL +); + +CREATE INDEX IF NOT EXISTS "ix_binaryproperties_version_id" ON "BinaryProperties" ("VersionId"); +CREATE INDEX IF NOT EXISTS "ix_binaryproperties_file_id" ON "BinaryProperties" ("FileId"); + +-- ReferenceProperties +CREATE TABLE IF NOT EXISTS "ReferenceProperties" ( + "ReferencePropertyId" SERIAL PRIMARY KEY, + "VersionId" INT NOT NULL, + "PropertyTypeId" INT NOT NULL, + "ReferredNodeId" INT NOT NULL +); + +CREATE INDEX IF NOT EXISTS "IX_ReferenceProperties_VersionIdPropertyTypeId" + ON "ReferenceProperties" ("VersionId", "PropertyTypeId"); +CREATE INDEX IF NOT EXISTS "IX_ReferenceProperties_ReferredNodeId" + ON "ReferenceProperties" ("ReferredNodeId"); +CREATE INDEX IF NOT EXISTS "ix_referenceproperties_version_id" ON "ReferenceProperties" ("VersionId"); + +-- LongTextProperties +CREATE TABLE IF NOT EXISTS "LongTextProperties" ( + "LongTextPropertyId" SERIAL PRIMARY KEY, + "VersionId" INT NOT NULL, + "PropertyTypeId" INT NOT NULL, + "Length" INT NULL, + "Value" TEXT NULL +); + +CREATE INDEX IF NOT EXISTS "ix_longtextproperties_version_id" ON "LongTextProperties" ("VersionId"); + +------------------------------------------------------------ +-- CREATE VIEWS +------------------------------------------------------------ + +CREATE OR REPLACE VIEW "NodeInfoView" AS +SELECT N."NodeId", T."Name" AS "Type", N."Name", N."Path"::TEXT AS "Path", N."LockedById", + V."VersionId", + CAST(V."MajorNumber" AS VARCHAR) || '.' || CAST(V."MinorNumber" AS VARCHAR) || '.' || + CASE V."Status" + WHEN 1 THEN 'A' WHEN 2 THEN 'L' WHEN 4 THEN 'D' + WHEN 8 THEN 'R' WHEN 16 THEN 'P' ELSE '' END AS "Version", + CASE V."VersionId" WHEN N."LastMajorVersionId" THEN 'TRUE' ELSE 'false' END AS "LastPub", + CASE V."VersionId" WHEN N."LastMinorVersionId" THEN 'TRUE' ELSE 'false' END AS "LastWork" +FROM "Versions" AS V + INNER JOIN "Nodes" AS N ON V."NodeId" = N."NodeId" + INNER JOIN "NodeTypes" AS T ON N."NodeTypeId" = T."NodeTypeId"; + +CREATE OR REPLACE VIEW "ReferencesInfoView" AS +SELECT Nodes."Name" AS "SrcName", + 'V' || CAST(Versions."MajorNumber" AS VARCHAR) || '.' || CAST(Versions."MinorNumber" AS VARCHAR) AS "SrcVer", + Slots."Name" AS "RelType", RefNodes."Name" AS "TargetName", + Nodes."NodeId" AS "SrcId", RefNodes."NodeId" AS "TargetId", + Nodes."Path"::TEXT AS "SrcPath", RefNodes."Path"::TEXT AS "TargetPath" +FROM "ReferenceProperties" AS Refs + INNER JOIN "Versions" AS Versions ON Refs."VersionId" = Versions."VersionId" + INNER JOIN "Nodes" AS Nodes ON Versions."NodeId" = Nodes."NodeId" + INNER JOIN "Nodes" AS RefNodes ON Refs."ReferredNodeId" = RefNodes."NodeId" + INNER JOIN "PropertyTypes" AS Slots ON Refs."PropertyTypeId" = Slots."PropertyTypeId" +UNION ALL +SELECT Nodes."Name", 'V*.*', 'Parent', RefNodes."Name", + Nodes."NodeId", RefNodes."NodeId", Nodes."Path"::TEXT, RefNodes."Path"::TEXT +FROM "Nodes" AS Nodes + INNER JOIN "Nodes" AS RefNodes ON Nodes."ParentNodeId" = RefNodes."NodeId" +UNION ALL +SELECT Nodes."Name", 'V*.*', 'LockedById', RefNodes."Name", + Nodes."NodeId", RefNodes."NodeId", Nodes."Path"::TEXT, RefNodes."Path"::TEXT +FROM "Nodes" AS Nodes + INNER JOIN "Nodes" AS RefNodes ON Nodes."LockedById" = RefNodes."NodeId" +UNION ALL +SELECT Nodes."Name", + 'V' || CAST(Versions."MajorNumber" AS VARCHAR) || '.' || CAST(Versions."MinorNumber" AS VARCHAR), + 'CreatedById', RefNodes."Name", + Nodes."NodeId", RefNodes."NodeId", Nodes."Path"::TEXT, RefNodes."Path"::TEXT +FROM "Nodes" AS Nodes + INNER JOIN "Versions" AS Versions ON Nodes."NodeId" = Versions."NodeId" + INNER JOIN "Nodes" AS RefNodes ON Versions."CreatedById" = RefNodes."NodeId" +UNION ALL +SELECT Nodes."Name", + 'V' || CAST(Versions."MajorNumber" AS VARCHAR) || '.' || CAST(Versions."MinorNumber" AS VARCHAR), + 'ModifiedById', RefNodes."Name", + Nodes."NodeId", RefNodes."NodeId", Nodes."Path"::TEXT, RefNodes."Path"::TEXT +FROM "Nodes" AS Nodes + INNER JOIN "Versions" AS Versions ON Nodes."NodeId" = Versions."NodeId" + INNER JOIN "Nodes" AS RefNodes ON Versions."ModifiedById" = RefNodes."NodeId"; + +------------------------------------------------------------ +-- ADDITIONAL TABLES +------------------------------------------------------------ + +-- JournalItems +CREATE TABLE IF NOT EXISTS "JournalItems" ( + "Id" SERIAL PRIMARY KEY, + "When" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "Wherewith" VARCHAR(450) NOT NULL, + "What" VARCHAR(100) NOT NULL, + "Who" VARCHAR(200) NOT NULL, + "RowGuid" UUID NOT NULL DEFAULT gen_random_uuid(), + "Timestamp" BIGINT NOT NULL DEFAULT 0, + "NodeId" INT NOT NULL, + "DisplayName" VARCHAR(450) NOT NULL, + "NodeTypeName" VARCHAR(100) NOT NULL, + "SourcePath" VARCHAR(450) NULL, + "TargetPath" VARCHAR(450) NULL, + "TargetDisplayName" VARCHAR(450) NULL, + "Hidden" BOOLEAN NOT NULL, + "Details" VARCHAR(450) NULL +); + +CREATE INDEX IF NOT EXISTS "IX_JournalItems" ON "JournalItems" ("When" DESC, "Wherewith"); + +CREATE TRIGGER trg_update_timestamp_journalitems + BEFORE UPDATE ON "JournalItems" + FOR EACH ROW + EXECUTE FUNCTION update_timestamp(); + +-- LogEntries +CREATE TABLE IF NOT EXISTS "LogEntries" ( + "LogId" SERIAL PRIMARY KEY, + "EventId" INT NOT NULL, + "Category" VARCHAR(50) NULL, + "Priority" INT NOT NULL, + "Severity" VARCHAR(30) NOT NULL, + "Title" VARCHAR(256) NULL, + "ContentId" INT NULL, + "ContentPath" VARCHAR(450) NULL, + "UserName" VARCHAR(450) NULL, + "LogDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "MachineName" VARCHAR(32) NULL, + "AppDomainName" VARCHAR(512) NULL, + "ProcessID" VARCHAR(256) NULL, + "ProcessName" VARCHAR(512) NULL, + "ThreadName" VARCHAR(512) NULL, + "Win32ThreadId" VARCHAR(128) NULL, + "Message" VARCHAR(1500) NULL, + "FormattedMessage" TEXT NULL, + "RowGuid" UUID NOT NULL DEFAULT gen_random_uuid(), + "Timestamp" BIGINT NOT NULL DEFAULT 0 +); + +CREATE TRIGGER trg_update_timestamp_logentries + BEFORE UPDATE ON "LogEntries" + FOR EACH ROW + EXECUTE FUNCTION update_timestamp(); + +-- IndexingActivities +CREATE TABLE IF NOT EXISTS "IndexingActivities" ( + "IndexingActivityId" SERIAL PRIMARY KEY, + "ActivityType" VARCHAR(50) NOT NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "RunningState" VARCHAR(10) NOT NULL, + "LockTime" TIMESTAMP WITHOUT TIME ZONE NULL, + "NodeId" INT NOT NULL, + "VersionId" INT NOT NULL, + "Path" VARCHAR(450) NOT NULL, + "VersionTimestamp" BIGINT NULL, + "Extension" TEXT NULL +); + +-- WorkflowNotification +CREATE TABLE IF NOT EXISTS "WorkflowNotification" ( + "NotificationId" SERIAL PRIMARY KEY, + "NodeId" INT NOT NULL, + "WorkflowInstanceId" UUID NOT NULL, + "WorkflowNodePath" VARCHAR(450) NOT NULL, + "BookmarkName" VARCHAR(50) NOT NULL +); + +-- SchemaModification +CREATE TABLE IF NOT EXISTS "SchemaModification" ( + "SchemaModificationId" SERIAL PRIMARY KEY, + "ModificationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "LockToken" VARCHAR(50) NULL, + "Timestamp" BIGINT NOT NULL DEFAULT 0 +); + +CREATE TRIGGER trg_update_timestamp_schemamodification + BEFORE UPDATE ON "SchemaModification" + FOR EACH ROW + EXECUTE FUNCTION update_timestamp(); + +-- Packages +CREATE TABLE IF NOT EXISTS "Packages" ( + "Id" SERIAL PRIMARY KEY, + "PackageType" VARCHAR(50) NOT NULL, + "ComponentId" VARCHAR(450) NULL, + "ComponentVersion" VARCHAR(50) NULL, + "ReleaseDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ExecutionDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ExecutionResult" VARCHAR(50) NOT NULL, + "ExecutionError" TEXT NULL, + "Description" VARCHAR(1000) NULL, + "Manifest" TEXT NULL +); + +-- TreeLocks +CREATE TABLE IF NOT EXISTS "TreeLocks" ( + "TreeLockId" SERIAL PRIMARY KEY, + "Path" VARCHAR(450) NOT NULL, + "LockedAt" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +-- AccessTokens +CREATE TABLE IF NOT EXISTS "AccessTokens" ( + "AccessTokenId" SERIAL PRIMARY KEY, + "Value" VARCHAR(1000) NOT NULL, + "UserId" INT NOT NULL, + "ContentId" INT NULL, + "Feature" VARCHAR(1000) NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ExpirationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +-- SharedLocks +CREATE TABLE IF NOT EXISTS "SharedLocks" ( + "SharedLockId" SERIAL PRIMARY KEY, + "ContentId" INT NOT NULL, + "Lock" VARCHAR(1000) NOT NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +------------------------------------------------------------ +-- STORED FUNCTIONS +------------------------------------------------------------ + +-- Function for RestoreIndexingActivityStatus +CREATE OR REPLACE FUNCTION sn_restore_indexing_activity_status(p_last_activity_id INT, p_gaps TEXT) +RETURNS TEXT AS $$ +DECLARE + _state_string TEXT; + _last_in_db INT; + _gaps_in_db TEXT; +BEGIN + -- Lock the table to prevent concurrent modifications + LOCK TABLE "IndexingActivities" IN EXCLUSIVE MODE; + + _state_string := p_last_activity_id::TEXT || '(' || COALESCE(p_gaps, '') || ')'; + + -- Check if already restored with same state + IF EXISTS (SELECT 1 FROM "IndexingActivities" + WHERE "ActivityType" = 'Restore' AND "Extension" = _state_string) THEN + RETURN 'AlreadyRestored'; + END IF; + + -- Get current state from DB + SELECT "IndexingActivityId" INTO _last_in_db + FROM "IndexingActivities" + WHERE "RunningState" = 'Done' + ORDER BY "CreationDate" DESC + LIMIT 1; + + IF _last_in_db IS NULL THEN + _last_in_db := 0; + END IF; + + SELECT string_agg("IndexingActivityId"::TEXT, ',' ORDER BY "IndexingActivityId") + INTO _gaps_in_db + FROM "IndexingActivities" + WHERE "RunningState" != 'Done' AND "IndexingActivityId" < _last_in_db; + + -- Check if restoration is necessary + IF p_last_activity_id = _last_in_db AND COALESCE(p_gaps, '') = COALESCE(_gaps_in_db, '') THEN + RETURN 'NotNecessary'; + END IF; + + -- Perform the restoration + -- Reset activities after the restore point, and gap activities, to 'Waiting' + UPDATE "IndexingActivities" SET "RunningState" = 'Waiting' + WHERE "IndexingActivityId" > p_last_activity_id + OR "IndexingActivityId" = ANY(string_to_array(p_gaps, ',')::int[]); + + -- Record the restore operation + INSERT INTO "IndexingActivities" + ("ActivityType", "CreationDate", "RunningState", "LockTime", + "NodeId", "VersionId", "Path", "VersionTimestamp", "Extension") + VALUES + ('Restore', NOW() AT TIME ZONE 'UTC', 'Done', NOW() AT TIME ZONE 'UTC', + 0, 0, '', 0, _state_string); + + RETURN 'Restored'; +END; +$$ LANGUAGE plpgsql; + +------------------------------------------------------------ +-- Function: sn_insert_node_and_version +-- Atomically inserts a Node, a Version, and updates the +-- Node's LastMinorVersionId and LastMajorVersionId. +-- Returns the new IDs and timestamps. +------------------------------------------------------------ +CREATE OR REPLACE FUNCTION sn_insert_node_and_version( + p_node_type_id INT, + p_content_list_type_id INT, + p_content_list_id INT, + p_creating_in_progress SMALLINT, + p_is_deleted SMALLINT, + p_is_inherited SMALLINT, + p_parent_node_id INT, + p_name VARCHAR(450), + p_display_name VARCHAR(450), + p_path VARCHAR(450), + p_index INT, + p_locked SMALLINT, + p_locked_by_id INT, + p_etag VARCHAR(50), + p_lock_type INT, + p_lock_timeout INT, + p_lock_date TIMESTAMP WITHOUT TIME ZONE, + p_lock_token VARCHAR(50), + p_last_lock_update TIMESTAMP WITHOUT TIME ZONE, + p_creation_date TIMESTAMP WITHOUT TIME ZONE, + p_created_by_id INT, + p_modification_date TIMESTAMP WITHOUT TIME ZONE, + p_modified_by_id INT, + p_is_system SMALLINT, + p_owner_id INT, + p_saving_state INT, + p_major_number SMALLINT, + p_minor_number SMALLINT, + p_status SMALLINT, + p_changed_data TEXT, + p_version_creation_date TIMESTAMP WITHOUT TIME ZONE, + p_version_created_by_id INT, + p_version_modification_date TIMESTAMP WITHOUT TIME ZONE, + p_version_modified_by_id INT, + p_dynamic_properties TEXT, + p_content_list_properties TEXT +) +RETURNS TABLE( + "NodeId" INT, + "NodeTimestamp" BIGINT, + "VersionId" INT, + "VersionTimestamp" BIGINT, + "LastMajorVersionId" INT, + "LastMinorVersionId" INT, + "Path" TEXT +) AS $$ +DECLARE + v_node_id INT; + v_version_id INT; + v_node_ts BIGINT; + v_version_ts BIGINT; +BEGIN + INSERT INTO "Nodes" + ("NodeTypeId", "ContentListTypeId", "ContentListId", "CreatingInProgress", "IsDeleted", "IsInherited", + "ParentNodeId", "Name", "DisplayName", "Path", "Index", "Locked", "LockedById", + "ETag", "LockType", "LockTimeout", "LockDate", "LockToken", "LastLockUpdate", + "CreationDate", "CreatedById", "ModificationDate", "ModifiedById", + "IsSystem", "OwnerId", "SavingState") + VALUES + (p_node_type_id, p_content_list_type_id, p_content_list_id, p_creating_in_progress, p_is_deleted, p_is_inherited, + p_parent_node_id, p_name, p_display_name, p_path, p_index, p_locked, p_locked_by_id, + p_etag, p_lock_type, p_lock_timeout, p_lock_date, p_lock_token, p_last_lock_update, + p_creation_date, p_created_by_id, p_modification_date, p_modified_by_id, + p_is_system, p_owner_id, p_saving_state) + RETURNING "Nodes"."NodeId", "Nodes"."Timestamp" INTO v_node_id, v_node_ts; + + INSERT INTO "Versions" + ("NodeId", "MajorNumber", "MinorNumber", "CreationDate", "CreatedById", + "ModificationDate", "ModifiedById", "Status", "ChangedData", + "DynamicProperties", "ContentListProperties") + VALUES + (v_node_id, p_major_number, p_minor_number, p_version_creation_date, p_version_created_by_id, + p_version_modification_date, p_version_modified_by_id, p_status, p_changed_data, + p_dynamic_properties, p_content_list_properties) + RETURNING "Versions"."VersionId", "Versions"."Timestamp" INTO v_version_id, v_version_ts; + + IF p_status = 1 THEN + UPDATE "Nodes" SET "LastMinorVersionId" = v_version_id, "LastMajorVersionId" = v_version_id + WHERE "Nodes"."NodeId" = v_node_id; + ELSE + UPDATE "Nodes" SET "LastMinorVersionId" = v_version_id + WHERE "Nodes"."NodeId" = v_node_id; + END IF; + + SELECT n."Timestamp" INTO v_node_ts FROM "Nodes" n WHERE n."NodeId" = v_node_id; + + RETURN QUERY + SELECT v_node_id, v_node_ts, v_version_id, v_version_ts, + n."LastMajorVersionId", n."LastMinorVersionId", n."Path"::TEXT + FROM "Nodes" n + WHERE n."NodeId" = v_node_id; +END; +$$ LANGUAGE plpgsql; + +------------------------------------------------------------ +-- FOREIGN KEY CONSTRAINTS +------------------------------------------------------------ +ALTER TABLE "BinaryProperties" ADD CONSTRAINT "FK_BinaryProperties_PropertyTypes" + FOREIGN KEY ("PropertyTypeId") REFERENCES "PropertyTypes" ("PropertyTypeId"); +ALTER TABLE "BinaryProperties" ADD CONSTRAINT "FK_BinaryProperties_Versions" + FOREIGN KEY ("VersionId") REFERENCES "Versions" ("VersionId"); +ALTER TABLE "BinaryProperties" ADD CONSTRAINT "FK_BinaryProperties_Files" + FOREIGN KEY ("FileId") REFERENCES "Files" ("FileId"); + +ALTER TABLE "Nodes" ADD CONSTRAINT "FK_Nodes_LockedBy" + FOREIGN KEY ("LockedById") REFERENCES "Nodes" ("NodeId"); +ALTER TABLE "Nodes" ADD CONSTRAINT "FK_Nodes_Parent" + FOREIGN KEY ("ParentNodeId") REFERENCES "Nodes" ("NodeId"); +ALTER TABLE "Nodes" ADD CONSTRAINT "FK_Nodes_NodeTypes" + FOREIGN KEY ("NodeTypeId") REFERENCES "NodeTypes" ("NodeTypeId"); +ALTER TABLE "Nodes" ADD CONSTRAINT "FK_Nodes_Nodes_CreatedById" + FOREIGN KEY ("CreatedById") REFERENCES "Nodes" ("NodeId"); +ALTER TABLE "Nodes" ADD CONSTRAINT "FK_Nodes_Nodes_ModifiedById" + FOREIGN KEY ("ModifiedById") REFERENCES "Nodes" ("NodeId"); +ALTER TABLE "Nodes" ADD CONSTRAINT "FK_Nodes_Nodes_ContentListId" + FOREIGN KEY ("ContentListId") REFERENCES "Nodes" ("NodeId"); + +ALTER TABLE "ReferenceProperties" ADD CONSTRAINT "FK_ReferenceProperties_PropertyTypes" + FOREIGN KEY ("PropertyTypeId") REFERENCES "PropertyTypes" ("PropertyTypeId"); + +ALTER TABLE "NodeTypes" ADD CONSTRAINT "FK_NodeTypes_NodeTypes" + FOREIGN KEY ("ParentId") REFERENCES "NodeTypes" ("NodeTypeId"); + +ALTER TABLE "LongTextProperties" ADD CONSTRAINT "FK_LongTextProperties_PropertyTypes" + FOREIGN KEY ("PropertyTypeId") REFERENCES "PropertyTypes" ("PropertyTypeId"); +ALTER TABLE "LongTextProperties" ADD CONSTRAINT "FK_LongTextProperties_Versions" + FOREIGN KEY ("VersionId") REFERENCES "Versions" ("VersionId"); + +ALTER TABLE "Versions" ADD CONSTRAINT "FK_Versions_Nodes" + FOREIGN KEY ("NodeId") REFERENCES "Nodes" ("NodeId"); +ALTER TABLE "Versions" ADD CONSTRAINT "FK_Versions_Nodes_CreatedBy" + FOREIGN KEY ("CreatedById") REFERENCES "Nodes" ("NodeId"); +ALTER TABLE "Versions" ADD CONSTRAINT "FK_Versions_Nodes_ModifiedBy" + FOREIGN KEY ("ModifiedById") REFERENCES "Nodes" ("NodeId"); + +------------------------------------------------------------ +-- Disable foreign keys for initial data loading +------------------------------------------------------------ +ALTER TABLE "BinaryProperties" DISABLE TRIGGER ALL; +ALTER TABLE "Nodes" DISABLE TRIGGER ALL; +ALTER TABLE "ReferenceProperties" DISABLE TRIGGER ALL; +ALTER TABLE "LongTextProperties" DISABLE TRIGGER ALL; +ALTER TABLE "Versions" DISABLE TRIGGER ALL; + +------------------------------------------------------------ +-- StatisticalData tables +------------------------------------------------------------ +CREATE TABLE IF NOT EXISTS "StatisticalData" ( + "Id" SERIAL PRIMARY KEY, + "DataType" VARCHAR(50) NOT NULL, + "CreationTime" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "WrittenTime" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "Duration" BIGINT NULL, + "RequestLength" BIGINT NULL, + "ResponseLength" BIGINT NULL, + "ResponseStatusCode" INT NULL, + "Url" VARCHAR(1000) NULL, + "TargetId" INT NULL, + "ContentId" INT NULL, + "EventName" VARCHAR(50) NULL, + "ErrorMessage" VARCHAR(500) NULL, + "GeneralData" TEXT NULL +); + +CREATE INDEX IF NOT EXISTS "IX_StatisticalData_DataType_CreationTime" + ON "StatisticalData" ("DataType", "CreationTime"); + +CREATE TABLE IF NOT EXISTS "StatisticalAggregations" ( + "DataType" VARCHAR(50) NOT NULL, + "Date" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "Resolution" VARCHAR(10) NOT NULL, + "Data" TEXT NULL, + CONSTRAINT "PK_StatisticalAggregations" PRIMARY KEY ("DataType", "Date", "Resolution") +); + +------------------------------------------------------------ +-- ExclusiveLock table +------------------------------------------------------------ +CREATE TABLE IF NOT EXISTS "ExclusiveLocks" ( + "Id" SERIAL PRIMARY KEY, + "Name" VARCHAR(450) NOT NULL UNIQUE, + "OperationId" VARCHAR(450) NOT NULL, + "TimeLimit" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +------------------------------------------------------------ +-- ClientStore tables +------------------------------------------------------------ +CREATE TABLE IF NOT EXISTS "ClientApps" ( + "ClientId" VARCHAR(50) NOT NULL PRIMARY KEY, + "Name" VARCHAR(450) NULL, + "Repository" VARCHAR(450) NULL, + "UserName" VARCHAR(450) NULL, + "Authority" VARCHAR(450) NULL, + "Type" INT NULL +); + +CREATE INDEX IF NOT EXISTS "IX_ClientApps_Repository" ON "ClientApps" ("Repository"); +CREATE INDEX IF NOT EXISTS "IX_ClientApps_Authority" ON "ClientApps" ("Authority"); + +CREATE TABLE IF NOT EXISTS "ClientSecrets" ( + "Id" VARCHAR(50) NOT NULL PRIMARY KEY, + "ClientId" VARCHAR(50) NOT NULL REFERENCES "ClientApps" ("ClientId"), + "Value" VARCHAR(450) NOT NULL, + "CreationDate" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ValidTill" TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +CREATE INDEX IF NOT EXISTS "IX_ClientSecrets_ClientId" ON "ClientSecrets" ("ClientId"); diff --git a/src/ContentRepository.PostgreSql/Scripts/PgSqlInstall_Security.sql b/src/ContentRepository.PostgreSql/Scripts/PgSqlInstall_Security.sql new file mode 100644 index 000000000..fa40c4416 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Scripts/PgSqlInstall_Security.sql @@ -0,0 +1,57 @@ +------------------------------------------------------------ +-- sensenet PostgreSQL Security (EF) Installation Script +------------------------------------------------------------ + +DROP TABLE IF EXISTS "EFMessages" CASCADE; +DROP TABLE IF EXISTS "EFMemberships" CASCADE; +DROP TABLE IF EXISTS "EFEntries" CASCADE; +DROP TABLE IF EXISTS "EFEntities" CASCADE; + +-- EFEntities +CREATE TABLE IF NOT EXISTS "EFEntities" ( + "Id" INT NOT NULL, + "OwnerId" INT NULL, + "ParentId" INT NULL, + "IsInherited" BOOLEAN NOT NULL, + CONSTRAINT "PK_EFEntities" PRIMARY KEY ("Id") +); + +CREATE INDEX IF NOT EXISTS "IX_EFEntities_ParentId" ON "EFEntities" ("ParentId"); + +-- EFEntries +CREATE TABLE IF NOT EXISTS "EFEntries" ( + "EFEntityId" INT NOT NULL, + "EntryType" INT NOT NULL, + "IdentityId" INT NOT NULL, + "LocalOnly" BOOLEAN NOT NULL, + "AllowBits" BIGINT NOT NULL, + "DenyBits" BIGINT NOT NULL, + CONSTRAINT "PK_EFEntries" PRIMARY KEY ("EFEntityId", "EntryType", "IdentityId", "LocalOnly") +); + +CREATE INDEX IF NOT EXISTS "IX_EFEntries_EFEntityId" ON "EFEntries" ("EFEntityId"); + +-- EFMemberships +CREATE TABLE IF NOT EXISTS "EFMemberships" ( + "GroupId" INT NOT NULL, + "MemberId" INT NOT NULL, + "IsUser" BOOLEAN NOT NULL, + CONSTRAINT "PK_EFMemberships" PRIMARY KEY ("GroupId", "MemberId") +); + +-- EFMessages +CREATE TABLE IF NOT EXISTS "EFMessages" ( + "Id" SERIAL PRIMARY KEY, + "SavedBy" TEXT NULL, + "SavedAt" TIMESTAMP WITHOUT TIME ZONE NOT NULL, + "ExecutionState" TEXT NULL, + "LockedBy" TEXT NULL, + "LockedAt" TIMESTAMP WITHOUT TIME ZONE NULL, + "Body" BYTEA NULL +); + +-- Foreign Keys +ALTER TABLE "EFEntities" ADD CONSTRAINT "FK_EFEntities_EFEntities_ParentId" + FOREIGN KEY ("ParentId") REFERENCES "EFEntities" ("Id"); +ALTER TABLE "EFEntries" ADD CONSTRAINT "FK_EFEntries_EFEntities_EFEntityId" + FOREIGN KEY ("EFEntityId") REFERENCES "EFEntities" ("Id"); diff --git a/src/ContentRepository.PostgreSql/Security/PgSqlSecurityDataOptions.cs b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityDataOptions.cs new file mode 100644 index 000000000..bedd81b5a --- /dev/null +++ b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityDataOptions.cs @@ -0,0 +1,18 @@ +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient.Security +{ + /// + /// Configuration options for the PostgreSQL security data provider. + /// + public class PgSqlSecurityDataOptions + { + /// + /// SQL command timeout in seconds. Default: 120. + /// + public int SqlCommandTimeout { get; set; } = 120; + + /// + /// PostgreSQL connection string for the security database. + /// + public string ConnectionString { get; set; } + } +} diff --git a/src/ContentRepository.PostgreSql/Security/PgSqlSecurityDataProvider.cs b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityDataProvider.cs new file mode 100644 index 000000000..2eb6c8031 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityDataProvider.cs @@ -0,0 +1,1251 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.EntityFrameworkCore; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using SenseNet.Diagnostics; +using SenseNet.Security; +using SenseNet.Security.Messaging; +using SenseNet.Security.Messaging.SecurityMessages; +using SenseNet.Tools; + +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient.Security +{ + /// + /// PostgreSQL implementation of . + /// Replaces the SQL Server-only EFCSecurityDataProvider. + /// + public class PgSqlSecurityDataProvider : ISecurityDataProvider + { + private readonly PgSqlSecurityDataOptions _options; + private readonly ILogger _logger; + private readonly IMessageSenderManager _messageSenderManager; + private readonly IRetrier _retrier; + + public PgSqlSecurityDataProvider( + IMessageSenderManager messageSenderManager, + IRetrier retrier, + IOptions options, + ILogger logger) + { + _messageSenderManager = messageSenderManager; + _retrier = retrier; + _options = options?.Value ?? new PgSqlSecurityDataOptions(); + _logger = logger; + + if (string.IsNullOrEmpty(_options.ConnectionString)) + _logger.LogError("No connection string was configured for the PostgreSQL security database."); + } + + internal PgSqlSecurityStorage Db() + { + return new PgSqlSecurityStorage(ConnectionString, CommandTimeout); + } + + private int CommandTimeout => _options.SqlCommandTimeout; + + public string ConnectionString + { + get => _options.ConnectionString; + set => _options.ConnectionString = value; + } + + public IActivitySerializer ActivitySerializer { get; set; } + + // ===================================================================== Database Setup + + public void InstallDatabase() + { + // Tables are created by PgSqlDataInstaller via the schema SQL script. + // No additional action needed here. + } + + public async Task IsDatabaseReadyAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: IsDatabaseReady()"); + + const string schemaCheckSql = @" +SELECT CASE WHEN EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_name = 'EFEntries' AND table_schema = 'public' +) THEN TRUE ELSE FALSE END"; + + var result = false; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var conn = db.Database.GetDbConnection(); + try + { + await conn.OpenAsync(cancel).ConfigureAwait(false); + var cmd = conn.CreateCommand(); + cmd.CommandType = System.Data.CommandType.Text; + cmd.CommandText = schemaCheckSql; + var dbResult = await cmd.ExecuteScalarAsync(cancel).ConfigureAwait(false); + result = Convert.ToBoolean(dbResult); + } + catch (Exception ex) + { + _logger.LogTrace("Error when accessing the database: {Message}", ex.Message); + } + finally + { + await conn.CloseAsync().ConfigureAwait(false); + } + } + op.Successful = true; + return result; + } + + // ===================================================================== Security Entities + + [Obsolete("Use async version instead.")] + public IEnumerable LoadSecurityEntities() + { + return LoadSecurityEntitiesAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task> LoadSecurityEntitiesAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadSecurityEntities()"); + + IEnumerable result; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + result = await db.EFEntities.AsNoTracking().Select(x => new StoredSecurityEntity + { + Id = x.Id, + nullableOwnerId = x.OwnerId, + nullableParentId = x.ParentId, + IsInherited = x.IsInherited + }).ToArrayAsync(cancel).ConfigureAwait(false); + } + op.Successful = true; + return result; + } + + public IEnumerable LoadAffectedEntityIdsByEntriesAndBreaks() + { + return LoadAffectedEntityIdsByEntriesAndBreaksAsync(CancellationToken.None) + .GetAwaiter().GetResult(); + } + + public async Task> LoadAffectedEntityIdsByEntriesAndBreaksAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadAffectedEntityIdsByEntriesAndBreaks()"); + + IEnumerable result; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + // Get entity IDs that have explicit entries or broken inheritance + var entryEntityIds = await db.EFEntries + .Select(e => e.EFEntityId).Distinct() + .ToArrayAsync(cancel).ConfigureAwait(false); + + var brokenInheritanceIds = await db.EFEntities + .Where(e => !e.IsInherited) + .Select(e => e.Id) + .ToArrayAsync(cancel).ConfigureAwait(false); + + result = entryEntityIds.Union(brokenInheritanceIds).Distinct().ToArray(); + } + op.Successful = true; + return result; + } + + public IEnumerable LoadAllAces() + { + using var db = Db(); + foreach (var entry in db.EFEntries.AsNoTracking()) + { + yield return new StoredAce + { + EntityId = entry.EFEntityId, + EntryType = (EntryType)entry.EntryType, + IdentityId = entry.IdentityId, + LocalOnly = entry.LocalOnly, + AllowBits = (ulong)entry.AllowBits, + DenyBits = (ulong)entry.DenyBits + }; + } + } + + [Obsolete("Use async version instead.")] + public StoredSecurityEntity LoadStoredSecurityEntity(int entityId) + { + return LoadStoredSecurityEntityAsync(entityId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task LoadStoredSecurityEntityAsync(int entityId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadStoredSecurityEntityAsync(entityId: {0})", entityId); + + var result = await RetryAsync(async () => + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var entity = await db.EFEntities + .AsNoTracking() + .Where(e => e.Id == entityId) + .Select(e => new + { + e.Id, + e.OwnerId, + e.ParentId, + e.IsInherited, + HasExplicitEntry = db.EFEntries.Any(en => en.EFEntityId == e.Id) + }) + .FirstOrDefaultAsync(cancel).ConfigureAwait(false); + + if (entity == null) return null; + + return new StoredSecurityEntity + { + Id = entity.Id, + nullableOwnerId = entity.OwnerId, + nullableParentId = entity.ParentId, + IsInherited = entity.IsInherited, + HasExplicitEntry = entity.HasExplicitEntry + }; + } + }, cancel).ConfigureAwait(false); + + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public void InsertSecurityEntity(StoredSecurityEntity entity) + { + InsertSecurityEntityAsync(entity, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task InsertSecurityEntityAsync(StoredSecurityEntity entity, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: InsertSecurityEntity. Id: {0}, ParentId: {1}", + entity.Id, entity.ParentId); + + await RetryAsync(async () => + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var existing = await db.EFEntities + .FirstOrDefaultAsync(x => x.Id == entity.Id, cancel).ConfigureAwait(false); + if (existing != null) return; + + db.EFEntities.Add(new PgSqlEFEntity + { + Id = entity.Id, + OwnerId = entity.nullableOwnerId, + ParentId = entity.nullableParentId, + IsInherited = entity.IsInherited + }); + + try + { + await db.SaveChangesAsync(cancel).ConfigureAwait(false); + } + catch (DbUpdateException) + { + // entity already exists, that's ok + } + } + }, cancel).ConfigureAwait(false); + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void UpdateSecurityEntity(StoredSecurityEntity entity) + { + UpdateSecurityEntityAsync(entity, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task UpdateSecurityEntityAsync(StoredSecurityEntity entity, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: UpdateSecurityEntity. Id: {0}, ParentId: {1}", + entity.Id, entity.ParentId); + + var exceptions = new List(); + for (var retry = 3; retry > 0; retry--) + { + try + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var oldEntity = await db.EFEntities + .FirstOrDefaultAsync(x => x.Id == entity.Id, cancel).ConfigureAwait(false); + if (oldEntity == null) + throw new EntityNotFoundException("Cannot update entity because it does not exist: " + entity.Id); + + oldEntity.OwnerId = entity.nullableOwnerId; + oldEntity.ParentId = entity.nullableParentId; + oldEntity.IsInherited = entity.IsInherited; + + await db.SaveChangesAsync(cancel).ConfigureAwait(false); + return; + } + } + catch (DbUpdateConcurrencyException ex) + { + exceptions.Add(ex); + if (retry > 1) await System.Threading.Tasks.Task.Delay(10, cancel).ConfigureAwait(false); + } + catch (Exception ex) + { + exceptions.Add(ex); + break; + } + } + + if (exceptions.Count > 0) + throw new SecurityStructureException( + "Cannot update entity because of concurrency: " + entity.Id, + new AggregateException(exceptions)); + + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void DeleteSecurityEntity(int entityId) + { + DeleteSecurityEntityAsync(entityId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task DeleteSecurityEntityAsync(int entityId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: DeleteSecurityEntity(entityId: {0})", entityId); + + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var oldEntity = await db.EFEntities + .FirstOrDefaultAsync(x => x.Id == entityId, cancel).ConfigureAwait(false); + if (oldEntity == null) return; + + db.EFEntities.Remove(oldEntity); + try + { + await db.SaveChangesAsync(cancel).ConfigureAwait(false); + } + catch (DbUpdateConcurrencyException) + { + // already deleted + } + catch (Exception ex) + { + throw new SecurityStructureException( + "Cannot delete entity because of a database error: " + entityId, ex); + } + } + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void MoveSecurityEntity(int sourceId, int targetId) + { + MoveSecurityEntityAsync(sourceId, targetId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task MoveSecurityEntityAsync(int sourceId, int targetId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: MoveSecurityEntity(sourceId: {0}, targetId: {1})", sourceId, targetId); + + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var source = await db.EFEntities + .FirstOrDefaultAsync(x => x.Id == sourceId, cancel).ConfigureAwait(false); + if (source == null) + throw new EntityNotFoundException("Cannot execute the move operation because source does not exist: " + sourceId); + + var target = await db.EFEntities + .FirstOrDefaultAsync(x => x.Id == targetId, cancel).ConfigureAwait(false); + if (target == null) + throw new EntityNotFoundException("Cannot execute the move operation because target does not exist: " + targetId); + + source.ParentId = target.Id; + await db.SaveChangesAsync(cancel).ConfigureAwait(false); + } + op.Successful = true; + } + + // ===================================================================== Permission Entries + + [Obsolete("Use async version instead.")] + public IEnumerable LoadAllPermissionEntries() + { + return LoadAllPermissionEntriesAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task> LoadAllPermissionEntriesAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadAllPermissionEntries()"); + + IEnumerable result; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var dbResult = await db.EFEntries.ToArrayAsync(cancel).ConfigureAwait(false); + result = dbResult.Select(a => new StoredAce + { + EntityId = a.EFEntityId, + EntryType = (EntryType)a.EntryType, + IdentityId = a.IdentityId, + LocalOnly = a.LocalOnly, + AllowBits = (ulong)a.AllowBits, + DenyBits = (ulong)a.DenyBits + }).ToArray(); + } + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public IEnumerable LoadPermissionEntries(IEnumerable entityIds) + { + return LoadPermissionEntriesAsync(entityIds, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task> LoadPermissionEntriesAsync( + IEnumerable entityIds, CancellationToken cancel) + { + var entityIdArray = entityIds as int[] ?? entityIds.ToArray(); + + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadPermissionEntries(entityIds: {0})", + string.Join(", ", entityIdArray.Select(x => x.ToString()))); + + var result = await RetryAsync(async () => + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var dbResult = await db.EFEntries + .Where(x => entityIdArray.Contains(x.EFEntityId)) + .ToArrayAsync(cancel).ConfigureAwait(false); + + return dbResult.Select(a => new StoredAce + { + EntityId = a.EFEntityId, + EntryType = (EntryType)a.EntryType, + IdentityId = a.IdentityId, + LocalOnly = a.LocalOnly, + AllowBits = (ulong)a.AllowBits, + DenyBits = (ulong)a.DenyBits + }).ToArray(); + } + }, cancel).ConfigureAwait(false); + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public int GetEstimatedEntityCount() + { + return GetEstimatedEntityCountAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task GetEstimatedEntityCountAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: GetEstimatedEntityCount()"); + + int result; + var db = Db(); + await using (db.ConfigureAwait(false)) + result = await db.EFEntities.CountAsync(cancel).ConfigureAwait(false); + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public void WritePermissionEntries(IEnumerable aces) + { + WritePermissionEntriesAsync(aces, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task WritePermissionEntriesAsync(IEnumerable aces, CancellationToken cancel) + { + var storedAces = aces as StoredAce[] ?? aces.ToArray(); + + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: WritePermissionEntries. Count: {0}", storedAces.Length); + + try + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var sb = new StringBuilder(); + sb.AppendLine("BEGIN;"); + + foreach (var ace in storedAces) + sb.AppendFormat( + "DELETE FROM \"EFEntries\" WHERE \"EFEntityId\" = {0} AND \"EntryType\" = {1} AND \"IdentityId\" = {2} AND \"LocalOnly\" = {3};\n", + ace.EntityId, (int)ace.EntryType, ace.IdentityId, ace.LocalOnly ? "true" : "false"); + + foreach (var ace in storedAces) + sb.AppendFormat( + "INSERT INTO \"EFEntries\" (\"EFEntityId\", \"EntryType\", \"IdentityId\", \"LocalOnly\", \"AllowBits\", \"DenyBits\") VALUES ({0}, {1}, {2}, {3}, {4}, {5});\n", + ace.EntityId, (int)ace.EntryType, ace.IdentityId, ace.LocalOnly ? "true" : "false", + (long)ace.AllowBits, (long)ace.DenyBits); + + sb.AppendLine("COMMIT;"); + + await db.Database.ExecuteSqlRawAsync(sb.ToString(), cancel).ConfigureAwait(false); + } + } + catch (Npgsql.PostgresException ex) + { + var message = ex.Message.Contains("foreign key constraint") + ? "Cannot write permission entries because one of the entities is missing from the database. " + + string.Join(",", storedAces.Select(a => a.EntityId).Distinct().OrderBy(ei => ei)) + : "Cannot write permission entries because of a database error."; + + throw new SecurityStructureException(message, ex); + } + + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void RemovePermissionEntries(IEnumerable aces) + { + RemovePermissionEntriesAsync(aces, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task RemovePermissionEntriesAsync(IEnumerable aces, CancellationToken cancel) + { + var storedAces = aces as StoredAce[] ?? aces.ToArray(); + if (storedAces.Length == 0) + return; + + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: RemovePermissionEntries. Count: {0}", storedAces.Length); + + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var sb = new StringBuilder(); + if (storedAces.Length > 1) + sb.AppendLine("BEGIN;"); + + foreach (var ace in storedAces) + sb.AppendFormat( + "DELETE FROM \"EFEntries\" WHERE \"EFEntityId\" = {0} AND \"EntryType\" = {1} AND \"IdentityId\" = {2} AND \"LocalOnly\" = {3};\n", + ace.EntityId, (int)ace.EntryType, ace.IdentityId, ace.LocalOnly ? "true" : "false"); + + if (storedAces.Length > 1) + sb.AppendLine("COMMIT;"); + + await db.Database.ExecuteSqlRawAsync(sb.ToString(), cancel).ConfigureAwait(false); + } + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void RemovePermissionEntriesByEntity(int entityId) + { + RemovePermissionEntriesByEntityAsync(entityId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task RemovePermissionEntriesByEntityAsync(int entityId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: RemovePermissionEntriesByEntity(entityId: {0})", entityId); + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync( + "DELETE FROM \"EFEntries\" WHERE \"EFEntityId\" = {0}", new object[] { entityId }, cancel) + .ConfigureAwait(false); + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void DeleteEntitiesAndEntries(int entityId) + { + DeleteEntitiesAndEntriesAsync(entityId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task DeleteEntitiesAndEntriesAsync(int entityId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: DeleteEntitiesAndEntries(entityId: {0})", entityId); + + const string script = @" +WITH RECURSIVE ""EntityCTE"" AS ( + SELECT ""Id"", ""ParentId"" FROM ""EFEntities"" WHERE ""Id"" = {0} + UNION ALL + SELECT E.""Id"", E.""ParentId"" FROM ""EFEntities"" E + INNER JOIN ""EntityCTE"" ON E.""ParentId"" = ""EntityCTE"".""Id"" +) +DELETE FROM ""EFEntries"" WHERE ""EFEntityId"" IN (SELECT ""Id"" FROM ""EntityCTE""); + +WITH RECURSIVE ""EntityCTE"" AS ( + SELECT ""Id"", ""ParentId"" FROM ""EFEntities"" WHERE ""Id"" = {0} + UNION ALL + SELECT E.""Id"", E.""ParentId"" FROM ""EFEntities"" E + INNER JOIN ""EntityCTE"" ON E.""ParentId"" = ""EntityCTE"".""Id"" +) +DELETE FROM ""EFEntities"" WHERE ""Id"" IN (SELECT ""Id"" FROM ""EntityCTE""); +"; + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync(script, new object[] { entityId }, cancel) + .ConfigureAwait(false); + op.Successful = true; + } + + // ===================================================================== Groups & Membership + + [Obsolete("Use async version instead.")] + public IEnumerable LoadAllGroups() + { + return LoadAllGroupsAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task> LoadAllGroupsAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadAllGroupsAsync()"); + + var groups = new Dictionary(); + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var memberships = await db.EFMemberships.AsNoTracking() + .ToArrayAsync(cancel).ConfigureAwait(false); + + foreach (var membership in memberships) + { + var group = EnsureGroup(membership.GroupId, groups); + if (membership.IsUser) + { + group.UserMemberIds.Add(membership.MemberId); + } + else + { + var memberGroup = EnsureGroup(membership.MemberId, groups); + group.Groups.Add(memberGroup); + memberGroup.ParentGroups.Add(group); + } + } + } + op.Successful = true; + return groups.Values; + } + + [Obsolete("Use async version instead.")] + public SecurityGroup LoadSecurityGroup(int groupId) + { + return LoadSecurityGroupAsync(groupId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task LoadSecurityGroupAsync(int groupId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadSecurityGroup(groupId: {0})", groupId); + + var group = new SecurityGroup(groupId); + var groups = new Dictionary { { group.Id, group } }; + var rows = 0; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + foreach (var membership in await db.EFMemberships + .Where(x => x.GroupId == groupId) + .ToArrayAsync(cancel).ConfigureAwait(false)) + { + rows++; + if (membership.IsUser) + { + group.UserMemberIds.Add(membership.MemberId); + } + else + { + var memberGroup = EnsureGroup(membership.MemberId, groups); + group.Groups.Add(memberGroup); + memberGroup.ParentGroups.Add(group); + } + } + } + op.Successful = true; + return rows == 0 ? null : group; + } + + public void QueryGroupRelatedEntities(int groupId, out IEnumerable entityIds, out IEnumerable exclusiveEntityIds) + { + var result = QueryGroupRelatedEntitiesAsync(groupId, CancellationToken.None).GetAwaiter().GetResult(); + entityIds = result.EntityIds; + exclusiveEntityIds = result.ExclusiveEntityIds; + } + + public async Task QueryGroupRelatedEntitiesAsync(int groupId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: QueryGroupRelatedEntities(groupId: {0})", groupId); + + var exclusiveEntityIds = new List(); + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var entityIds = await db.EFEntries + .Where(x => x.IdentityId == groupId) + .Select(x => x.EFEntityId).Distinct() + .ToArrayAsync(cancel).ConfigureAwait(false); + + foreach (var relatedEntityId in entityIds) + { + var aces = await db.EFEntries + .Where(x => x.EFEntityId == relatedEntityId) + .ToArrayAsync(cancel).ConfigureAwait(false); + var groupRelatedCount = aces.Count(x => x.IdentityId == groupId); + if (aces.Length == groupRelatedCount) + exclusiveEntityIds.Add(relatedEntityId); + } + + op.Successful = true; + return new GroupRelatedEntitiesQueryResult + { + EntityIds = entityIds, + ExclusiveEntityIds = exclusiveEntityIds + }; + } + } + + [Obsolete("Use async version instead.")] + public void DeleteIdentityAndRelatedEntries(int identityId) + { + DeleteIdentityAndRelatedEntriesAsync(identityId, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task DeleteIdentityAndRelatedEntriesAsync(int identityId, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: DeleteIdentityAndRelatedEntries(identityId: {0})", identityId); + + const string script = @" +DELETE FROM ""EFMemberships"" WHERE ""GroupId"" = {0} OR ""MemberId"" = {0}; +DELETE FROM ""EFEntries"" WHERE ""IdentityId"" = {0};"; + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync(script, new object[] { identityId }, cancel) + .ConfigureAwait(false); + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void DeleteIdentitiesAndRelatedEntries(IEnumerable ids) + { + DeleteIdentitiesAndRelatedEntriesAsync(ids, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task DeleteIdentitiesAndRelatedEntriesAsync(IEnumerable ids, CancellationToken cancel) + { + var idArray = ids as int[] ?? ids.ToArray(); + + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: DeleteIdentitiesAndRelatedEntries(ids: {0})", + string.Join(", ", idArray.Select(x => x.ToString()))); + + if (idArray.Length == 0) return; + + var idList = string.Join(", ", idArray); + var script = $@" +BEGIN; +DELETE FROM ""EFEntries"" WHERE ""IdentityId"" IN ({idList}); +DELETE FROM ""EFMemberships"" WHERE ""GroupId"" IN ({idList}) OR ""MemberId"" IN ({idList}); +COMMIT;"; + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync(script, cancel).ConfigureAwait(false); + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void AddMembers(int groupId, IEnumerable userMembers, IEnumerable groupMembers) + { + AddMembersAsync(groupId, userMembers, groupMembers, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task AddMembersAsync(int groupId, IEnumerable userMembers, + IEnumerable groupMembers, CancellationToken cancel) + { + groupMembers ??= Array.Empty(); + userMembers ??= Array.Empty(); + + var groupArray = groupMembers as int[] ?? groupMembers.ToArray(); + var userArray = userMembers as int[] ?? userMembers.ToArray(); + + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: AddMembers(groupId: {0}, userMembers: [{1}], groupMembers: [{2}])", + groupId, + string.Join(", ", userArray.Select(x => x.ToString())), + string.Join(", ", groupArray.Select(x => x.ToString()))); + + var allNewMembers = groupArray.Union(userArray); + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var origMemberIds = await db.EFMemberships + .Where(m => m.GroupId == groupId && allNewMembers.Contains(m.MemberId)) + .Select(m => m.MemberId) + .ToArrayAsync(cancel).ConfigureAwait(false); + + var newGroupIds = groupArray.Except(origMemberIds).ToArray(); + var newUserIds = userArray.Except(origMemberIds).ToArray(); + + db.EFMemberships.AddRange( + newGroupIds.Select(g => new PgSqlEFMembership { GroupId = groupId, MemberId = g, IsUser = false })); + db.EFMemberships.AddRange( + newUserIds.Select(g => new PgSqlEFMembership { GroupId = groupId, MemberId = g, IsUser = true })); + + await db.SaveChangesAsync(cancel).ConfigureAwait(false); + } + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void RemoveMembers(int groupId, IEnumerable userMembers, IEnumerable groupMembers) + { + RemoveMembersAsync(groupId, userMembers, groupMembers, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task RemoveMembersAsync(int groupId, IEnumerable userMembers, + IEnumerable groupMembers, CancellationToken cancel) + { + groupMembers ??= Array.Empty(); + userMembers ??= Array.Empty(); + + var groupArray = groupMembers as int[] ?? groupMembers.ToArray(); + var userArray = userMembers as int[] ?? userMembers.ToArray(); + + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: RemoveMembers(groupId: {0}, userMembers: [{1}], groupMembers: [{2}])", + groupId, + string.Join(", ", userArray.Select(x => x.ToString())), + string.Join(", ", groupArray.Select(x => x.ToString()))); + + var memberIds = string.Join(", ", groupArray.Union(userArray)); + if (string.IsNullOrEmpty(memberIds)) return; + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync( + $"DELETE FROM \"EFMemberships\" WHERE \"GroupId\" = {{0}} AND \"MemberId\" IN ({memberIds})", + new object[] { groupId }, cancel).ConfigureAwait(false); + op.Successful = true; + } + + // ===================================================================== Security Activities + + [Obsolete("Use async version instead.")] + public int GetLastSecurityActivityId() + { + return GetLastSecurityActivityIdAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task GetLastSecurityActivityIdAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: GetLastSecurityActivityId()"); + + int result; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var lastMsg = await db.EFMessages + .OrderByDescending(e => e.Id) + .FirstOrDefaultAsync(cancel).ConfigureAwait(false); + result = lastMsg?.Id ?? 0; + } + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public int[] GetUnprocessedActivityIds() + { + return GetUnprocessedActivityIdsAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task GetUnprocessedActivityIdsAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: GetUnprocessedActivityIds()"); + + int[] result; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + // Get unprocessed activity IDs + the current sequence value + var unprocessed = await db.EFMessages + .Where(x => x.ExecutionState != "Done") + .Select(x => x.Id) + .ToArrayAsync(cancel).ConfigureAwait(false); + + var maxId = await db.EFMessages + .OrderByDescending(x => x.Id) + .Select(x => x.Id) + .FirstOrDefaultAsync(cancel).ConfigureAwait(false); + + result = unprocessed.Append(maxId).OrderBy(x => x).ToArray(); + } + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public SecurityActivity[] LoadSecurityActivities(int from, int to, int count, bool executingUnprocessedActivities) + { + return LoadSecurityActivitiesAsync(from, to, count, executingUnprocessedActivities, CancellationToken.None) + .GetAwaiter().GetResult(); + } + + public async Task LoadSecurityActivitiesAsync(int from, int to, int count, + bool executingUnprocessedActivities, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadSecurityActivities(from: {0}, to: {1}, count: {2})", from, to, count); + + var result = await RetryAsync(async () => + { + var activities = new List(); + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var items = await db.EFMessages + .Where(x => x.Id >= from && x.Id <= to) + .OrderBy(x => x.Id) + .Take(count) + .ToArrayAsync(cancel).ConfigureAwait(false); + + foreach (var item in items) + { + var activity = ActivitySerializer.DeserializeActivity(item.Body); + if (activity == null) continue; + activity.Id = item.Id; + activity.FromDatabase = true; + activity.IsUnprocessedActivity = executingUnprocessedActivities; + activities.Add(activity); + } + } + return activities.ToArray(); + }, cancel).ConfigureAwait(false); + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public SecurityActivity[] LoadSecurityActivities(int[] gaps, bool executingUnprocessedActivities) + { + return LoadSecurityActivitiesAsync(gaps, executingUnprocessedActivities, CancellationToken.None) + .GetAwaiter().GetResult(); + } + + public async Task LoadSecurityActivitiesAsync(int[] gaps, + bool executingUnprocessedActivities, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadSecurityActivities(gaps: [{0}])", + gaps.Length > 20 ? $"count: {gaps.Length}" : string.Join(", ", gaps.Select(x => x.ToString()))); + + var result = await RetryAsync(async () => + { + var activities = new List(); + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var items = await db.EFMessages + .Where(x => gaps.Contains(x.Id)) + .OrderBy(x => x.Id) + .ToArrayAsync(cancel).ConfigureAwait(false); + + foreach (var item in items) + { + var activity = ActivitySerializer.DeserializeActivity(item.Body); + if (activity == null) continue; + activity.Id = item.Id; + activity.FromDatabase = true; + activity.IsUnprocessedActivity = executingUnprocessedActivities; + activities.Add(activity); + } + } + return activities.ToArray(); + }, cancel).ConfigureAwait(false); + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public SecurityActivity LoadSecurityActivity(int id) + { + return LoadSecurityActivityAsync(id, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task LoadSecurityActivityAsync(int id, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: LoadSecurityActivity(id: {0})", id); + + SecurityActivity result = null; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var msg = await db.EFMessages + .FirstOrDefaultAsync(x => x.Id == id, cancel).ConfigureAwait(false); + + if (msg != null) + { + result = ActivitySerializer.DeserializeActivity(msg.Body); + result.Id = msg.Id; + } + } + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public int SaveSecurityActivity(SecurityActivity activity, out int bodySize) + { + var result = SaveSecurityActivityAsync(activity, CancellationToken.None).GetAwaiter().GetResult(); + bodySize = result.BodySize; + return result.ActivityId; + } + + public async Task SaveSecurityActivityAsync(SecurityActivity activity, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: SaveSecurityActivity. Id: {0}, TypeName: {1}", + activity.Id, activity.TypeName); + + var body = ActivitySerializer.SerializeActivity(activity); + + var result = await RetryAsync(async () => + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var dbEntry = db.EFMessages.Add(new PgSqlEFMessage + { + ExecutionState = "Wait", + SavedBy = _messageSenderManager.InstanceId, + SavedAt = DateTime.UtcNow, + Body = body + }); + await db.SaveChangesAsync(cancel).ConfigureAwait(false); + return new SaveSecurityActivityResult { ActivityId = dbEntry.Entity.Id, BodySize = body.Length }; + } + }, cancel).ConfigureAwait(false); + + op.Successful = true; + return result; + } + + [Obsolete("Use async version instead.")] + public void CleanupSecurityActivities(int timeLimitInMinutes) + { + CleanupSecurityActivitiesAsync(timeLimitInMinutes, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task CleanupSecurityActivitiesAsync(int timeLimitInMinutes, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: CleanupSecurityActivities(timeLimitInMinutes: {0})", timeLimitInMinutes); + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync( + $"DELETE FROM \"EFMessages\" WHERE \"SavedAt\" < NOW() AT TIME ZONE 'UTC' - ('{timeLimitInMinutes} minutes')::INTERVAL AND \"ExecutionState\" = 'Done'", + cancel).ConfigureAwait(false); + op.Successful = true; + } + + // ===================================================================== Activity Execution Locks + + [Obsolete("Use async version instead.")] + public SecurityActivityExecutionLock AcquireSecurityActivityExecutionLock( + SecurityActivity securityActivity, int timeoutInSeconds) + { + return AcquireSecurityActivityExecutionLockAsync(securityActivity, timeoutInSeconds, CancellationToken.None) + .GetAwaiter().GetResult(); + } + + public async Task AcquireSecurityActivityExecutionLockAsync( + SecurityActivity securityActivity, int timeoutInSeconds, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: AcquireSecurityActivityExecutionLock. Id: {0}, TypeName: {1}, timeoutInSeconds: {2}", + securityActivity.Id, securityActivity.TypeName, timeoutInSeconds); + + var maxTime = timeoutInSeconds == int.MaxValue + ? DateTime.MaxValue + : DateTime.UtcNow.AddSeconds(timeoutInSeconds); + + while (DateTime.UtcNow < maxTime) + { + var lockResult = await RetryAsync(async () => + { + var db = Db(); + await using (db.ConfigureAwait(false)) + { + // Try to acquire the lock + var affected = await db.Database.ExecuteSqlRawAsync( + "UPDATE \"EFMessages\" SET \"ExecutionState\" = {0}, \"LockedBy\" = {1}, \"LockedAt\" = NOW() AT TIME ZONE 'UTC' " + + "WHERE \"Id\" = {2} AND (\"ExecutionState\" = 'Wait' OR (\"ExecutionState\" = 'Executing' AND \"LockedAt\" < NOW() AT TIME ZONE 'UTC' - ({3} || ' seconds')::INTERVAL))", + new object[] { "Executing", _messageSenderManager.InstanceId ?? "", securityActivity.Id, timeoutInSeconds.ToString() }, + cancel).ConfigureAwait(false); + + if (affected > 0) + return "LockedForYou"; + + // Check current state + var msg = await db.EFMessages + .Where(x => x.Id == securityActivity.Id) + .Select(x => x.ExecutionState) + .FirstOrDefaultAsync(cancel).ConfigureAwait(false); + + return msg ?? ""; + } + }, cancel).ConfigureAwait(false); + + switch (lockResult) + { + case "LockedForYou": + op.Successful = true; + return new SecurityActivityExecutionLock(securityActivity, this, true); + case "Executing": + case "Done": + op.Successful = true; + return new SecurityActivityExecutionLock(securityActivity, this, false); + } + } + + op.Successful = true; + throw new SecurityActivityTimeoutException( + $"Waiting for a SecurityActivityExecutionLock timed out: #{securityActivity.Id}/{securityActivity.TypeName}"); + } + + [Obsolete("Use async version instead.")] + public void RefreshSecurityActivityExecutionLock(SecurityActivity securityActivity) + { + RefreshSecurityActivityExecutionLockAsync(securityActivity, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task RefreshSecurityActivityExecutionLockAsync(SecurityActivity securityActivity, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: RefreshSecurityActivityExecutionLock. Id: {0}", securityActivity.Id); + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync( + "UPDATE \"EFMessages\" SET \"LockedAt\" = NOW() AT TIME ZONE 'UTC' WHERE \"Id\" = {0}", + new object[] { securityActivity.Id }, cancel).ConfigureAwait(false); + op.Successful = true; + } + + [Obsolete("Use async version instead.")] + public void ReleaseSecurityActivityExecutionLock(SecurityActivity securityActivity) + { + ReleaseSecurityActivityExecutionLockAsync(securityActivity, CancellationToken.None).GetAwaiter().GetResult(); + } + + public async System.Threading.Tasks.Task ReleaseSecurityActivityExecutionLockAsync(SecurityActivity securityActivity, CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: ReleaseSecurityActivityExecutionLock. Id: {0}", securityActivity.Id); + + var db = Db(); + await using (db.ConfigureAwait(false)) + await db.Database.ExecuteSqlRawAsync( + "UPDATE \"EFMessages\" SET \"ExecutionState\" = 'Done' WHERE \"Id\" = {0}", + new object[] { securityActivity.Id }, cancel).ConfigureAwait(false); + op.Successful = true; + } + + // ===================================================================== Consistency Check + + [Obsolete("Use async version instead.")] + public IEnumerable GetMembershipForConsistencyCheck() + { + return GetMembershipForConsistencyCheckAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + + public async Task> GetMembershipForConsistencyCheckAsync(CancellationToken cancel) + { + using var op = SnTrace.SecurityDatabase.StartOperation( + "PgSqlSecurityDataProvider: GetMembershipForConsistencyCheck()"); + + long[] result; + var db = Db(); + await using (db.ConfigureAwait(false)) + { + var dbResult = await db.EFMemberships.ToArrayAsync(cancel).ConfigureAwait(false); + result = dbResult.Select(m => (Convert.ToInt64(m.GroupId) << 32) + m.MemberId).ToArray(); + } + op.Successful = true; + return result; + } + + // ===================================================================== Tools + + private static SecurityGroup EnsureGroup(int groupId, Dictionary groups) + { + if (groups.TryGetValue(groupId, out var group)) + return group; + group = new SecurityGroup(groupId); + groups.Add(group.Id, group); + return group; + } + + private static bool RetriableException(Exception ex) + { + return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || + (ex is Npgsql.NpgsqlException && ex.Message.Contains("connection")); + } + + internal System.Threading.Tasks.Task RetryAsync(Func action, CancellationToken cancel) + { + return RetryAsync(async () => + { + await action().ConfigureAwait(false); + return null; + }, cancel); + } + + internal Task RetryAsync(Func> action, CancellationToken cancel) + { + return _retrier.RetryAsync(action, + shouldRetryOnError: (ex, _) => RetriableException(ex), + onAfterLastIteration: (_, ex, i) => + { + SnTrace.Security.WriteError( + $"Security data layer error: {ex.Message}. Retry cycle ended after {i} iterations."); + throw new InvalidOperationException("Security data layer timeout occurred.", ex); + }, + cancel: cancel); + } + } +} diff --git a/src/ContentRepository.PostgreSql/Security/PgSqlSecurityEntities.cs b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityEntities.cs new file mode 100644 index 000000000..fd94ad3e1 --- /dev/null +++ b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityEntities.cs @@ -0,0 +1,109 @@ +using System.Collections.Generic; +using System.ComponentModel.DataAnnotations; +using System.ComponentModel.DataAnnotations.Schema; +using Microsoft.EntityFrameworkCore; + +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient.Security +{ + /// + /// EF entity for the "EFEntities" table (security entities). + /// Mirrors the EFEntity from EFCSecurityStore but without SQL Server dependencies. + /// + [Table("EFEntities")] + public class PgSqlEFEntity + { + [Key] + [DatabaseGenerated(DatabaseGeneratedOption.None)] + public int Id { get; set; } + + public int? OwnerId { get; set; } + public int? ParentId { get; set; } + public bool IsInherited { get; set; } + + [ForeignKey("ParentId")] + public PgSqlEFEntity Parent { get; set; } + + public ICollection Children { get; set; } + public ICollection EFEntries { get; set; } + } + + /// + /// EF entity for the "EFEntries" table (ACE entries / permission entries). + /// + [Table("EFEntries")] + public class PgSqlEFEntry + { + public int EFEntityId { get; set; } + public int EntryType { get; set; } + public int IdentityId { get; set; } + public bool LocalOnly { get; set; } + public long AllowBits { get; set; } + public long DenyBits { get; set; } + + [ForeignKey("EFEntityId")] + public PgSqlEFEntity EFEntity { get; set; } + } + + /// + /// EF entity for the "EFMemberships" table (group memberships). + /// + [Table("EFMemberships")] + public class PgSqlEFMembership + { + public int GroupId { get; set; } + public int MemberId { get; set; } + public bool IsUser { get; set; } + } + + /// + /// EF entity for the "EFMessages" table (security activity messages). + /// + [Table("EFMessages")] + public class PgSqlEFMessage + { + [Key] + [DatabaseGenerated(DatabaseGeneratedOption.Identity)] + public int Id { get; set; } + + [MaxLength(400)] + public string SavedBy { get; set; } + + public System.DateTime SavedAt { get; set; } + + [MaxLength(20)] + public string ExecutionState { get; set; } + + [MaxLength(400)] + public string LockedBy { get; set; } + + public System.DateTime? LockedAt { get; set; } + + public byte[] Body { get; set; } + } + + // Helper types for raw SQL queries + + [Keyless] + public class PgSqlIntItem + { + public int Id { get; set; } + public int Value { get; set; } + } + + [Keyless] + public class PgSqlStringItem + { + public int Id { get; set; } + public string Value { get; set; } + } + + [Keyless] + public class PgSqlStoredSecurityEntity + { + public int Id { get; set; } + public int? nullableOwnerId { get; set; } + public int? nullableParentId { get; set; } + public bool IsInherited { get; set; } + public bool HasExplicitEntry { get; set; } + } +} diff --git a/src/ContentRepository.PostgreSql/Security/PgSqlSecurityStorage.cs b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityStorage.cs new file mode 100644 index 000000000..a3a0abcfd --- /dev/null +++ b/src/ContentRepository.PostgreSql/Security/PgSqlSecurityStorage.cs @@ -0,0 +1,64 @@ +using Microsoft.EntityFrameworkCore; + +namespace SenseNet.ContentRepository.Storage.Data.PgSqlClient.Security +{ + /// + /// PostgreSQL-compatible DbContext for sensenet security data. + /// Replaces the SQL Server-only SecurityStorage from EFCSecurityStore. + /// + internal class PgSqlSecurityStorage : DbContext + { + private readonly string _connectionString; + private readonly int _commandTimeout; + + public PgSqlSecurityStorage(string connectionString, int commandTimeout = 120) + { + _connectionString = connectionString; + _commandTimeout = commandTimeout; + } + + protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder) + { + optionsBuilder.UseNpgsql(_connectionString, options => + { + options.CommandTimeout(_commandTimeout); + }); + base.OnConfiguring(optionsBuilder); + } + + public DbSet EFEntities { get; set; } + public DbSet EFEntries { get; set; } + public DbSet EFMemberships { get; set; } + public DbSet EFMessages { get; set; } + + // Helper entity sets for raw SQL queries + internal DbSet IntSet { get; set; } + internal DbSet StringSet { get; set; } + internal DbSet StoredSecurityEntitySet { get; set; } + + protected override void OnModelCreating(ModelBuilder modelBuilder) + { + modelBuilder.Entity() + .HasOne(e => e.Parent) + .WithMany(e => e.Children) + .IsRequired(false) + .HasForeignKey(e => e.ParentId) + .OnDelete(DeleteBehavior.ClientSetNull); + + modelBuilder.Entity() + .HasOne(e => e.EFEntity) + .WithMany(f => f.EFEntries) + .IsRequired() + .HasForeignKey(e => e.EFEntityId) + .OnDelete(DeleteBehavior.ClientSetNull); + + modelBuilder.Entity() + .HasKey(a => new { a.GroupId, a.MemberId }); + + modelBuilder.Entity() + .HasKey(a => new { a.EFEntityId, a.EntryType, a.IdentityId, a.LocalOnly }); + + base.OnModelCreating(modelBuilder); + } + } +} diff --git a/src/ContentRepository.PostgreSql/SenseNet.ContentRepository.PostgreSql.csproj b/src/ContentRepository.PostgreSql/SenseNet.ContentRepository.PostgreSql.csproj new file mode 100644 index 000000000..ab512d726 --- /dev/null +++ b/src/ContentRepository.PostgreSql/SenseNet.ContentRepository.PostgreSql.csproj @@ -0,0 +1,25 @@ + + + + net8.0 + SenseNet.ContentRepository.PostgreSql + SenseNet.ContentRepository + + + + + + + + + + + + + + + + + + + diff --git a/src/ContentRepository/Packaging/Tools/ContentTypeBuilder.cs b/src/ContentRepository/Packaging/Tools/ContentTypeBuilder.cs index aa00e724f..b8cf79ebe 100644 --- a/src/ContentRepository/Packaging/Tools/ContentTypeBuilder.cs +++ b/src/ContentRepository/Packaging/Tools/ContentTypeBuilder.cs @@ -596,7 +596,7 @@ private void ChangeFieldTypeInternal(string fieldName, string targetType) // Iterate through the types again in reverse order (root --> leaves) and register // the field with the new type. - foreach (var contentTypeName in contentTypeNames.Reverse()) + foreach (var contentTypeName in ((IEnumerable)contentTypeNames).Reverse()) { var contentType = ContentType.GetByName(contentTypeName); var ctdXml = LoadContentTypeXmlDocument(contentType); diff --git a/src/ContentRepository/Repository.cs b/src/ContentRepository/Repository.cs index dccc053f8..fc2c64a2d 100644 --- a/src/ContentRepository/Repository.cs +++ b/src/ContentRepository/Repository.cs @@ -151,6 +151,37 @@ private static void EnsureApiKeyForAdmin(IServiceProvider services, ILogger logg { logger?.LogInformation("Check apikey for admin..."); var akm = services.GetRequiredService(); + + // Check for a configured (static) API key from configuration. + // IConfiguration automatically maps env vars (sensenet__repository__Authentication__ApiKey) + // and appsettings.json into the unified key format. + var config = services.GetService(); + var configuredApiKey = config?["sensenet:repository:Authentication:ApiKey"]; + + if (!string.IsNullOrEmpty(configuredApiKey)) + { + // Ensure the configured API key exists in the database + var existingToken = AccessTokenVault.GetTokenAsync(configuredApiKey, 0, "apikey", CancellationToken.None) + .GetAwaiter().GetResult(); + if (existingToken == null) + { + AccessTokenVault.CreateTokenAsync( + Identifiers.AdministratorUserId, + TimeSpan.FromDays(365 * 100), // effectively never expires + 0, + "apikey", + configuredApiKey, + CancellationToken.None) + .GetAwaiter().GetResult(); + logger?.LogInformation("Configured API key inserted into database for admin."); + } + else + { + logger?.LogInformation("Configured API key already exists in database."); + } + } + + // Also ensure at least one API key exists (random key as fallback) var apiKey = akm.GetApiKeysByUserAsync(Identifiers.AdministratorUserId, CancellationToken.None) .GetAwaiter().GetResult() .Where(a => a.ExpirationDate > DateTime.UtcNow) diff --git a/src/SenseNet.sln b/src/SenseNet.sln index 3288cefe7..b761dcbae 100644 --- a/src/SenseNet.sln +++ b/src/SenseNet.sln @@ -1,814 +1,840 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 -VisualStudioVersion = 17.0.32014.148 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tools", "Tools", "{875EF569-4684-473D-A2D4-A35B20B4A07C}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Core", "Core", "{2997D17C-A736-43E5-B3DD-11D11AC7DF17}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository", "ContentRepository\SenseNet.ContentRepository.csproj", "{786E6165-CA02-45A9-BF58-207A45D7D6DF}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Storage", "Storage\SenseNet.Storage.csproj", "{5DB4DDBA-81F6-4D81-943A-18F3178B3355}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Common", "Common\SenseNet.Common.csproj", "{A453E920-29C0-45CD-984C-0D8E3631B1E3}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{6D908666-E47F-47A7-A7E5-C696B9CD517A}" - ProjectSection(SolutionItems) = preProject - nuget\snadmin\install-services-core\manifest.xml = nuget\snadmin\install-services-core\manifest.xml - nuget\readme.txt = nuget\readme.txt - EndProjectSection -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tests", "Tests", "{C68D256D-7D40-4E33-8A2B-B1625538B138}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Search", "Search\SenseNet.Search.csproj", "{0279705B-779D-485D-86B9-F7AB3DD1F2C3}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository.Tests", "Tests\SenseNet.ContentRepository.Tests\SenseNet.ContentRepository.Tests.csproj", "{1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Packaging.Tests", "Tests\SenseNet.Packaging.Tests\SenseNet.Packaging.Tests.csproj", "{6B711835-9172-4F07-9FC3-BA79C7DFA916}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Search.Tests", "Tests\SenseNet.Search.Tests\SenseNet.Search.Tests.csproj", "{43E6E75C-2410-464D-B7D6-36F8ACF85A17}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.BlobStorage", "BlobStorage\SenseNet.BlobStorage.csproj", "{C250C071-6ACD-42E0-9FFC-63283AFB8C6C}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.OData", "OData\SenseNet.OData.csproj", "{EF5A2EB2-723F-4CAF-9950-954421E3B0A0}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ODataTests", "Tests\SenseNet.ODataTests\SenseNet.ODataTests.csproj", "{B2F72AB7-36C5-4E95-839F-0DD340AC8C36}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository.InMemory", "ContentRepository.InMemory\SenseNet.ContentRepository.InMemory.csproj", "{DF893D4B-BB76-4A5C-AB92-B1A8B283577F}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Core", "Services.Core\SenseNet.Services.Core.csproj", "{2C97F180-2F80-49A5-9C8B-D114E4CCC819}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Core.Install", "Services.Core.Install\SenseNet.Services.Core.Install.csproj", "{E1FD0E29-39F8-40A1-89D7-F513DD783089}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnConsoleInstaller", "Tools\SnConsoleInstaller\SnConsoleInstaller.csproj", "{0A1EF09D-E36E-46A8-A734-844C09323873}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Core.Tests", "Tests\SenseNet.Services.Core.Tests\SenseNet.Services.Core.Tests.csproj", "{D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnInitialDataGenerator", "Tools\SnInitialDataGenerator\SnInitialDataGenerator.csproj", "{7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnInitialDataGenerator.Tests", "Tests\SnInitialDataGenerator.Tests\SnInitialDataGenerator.Tests.csproj", "{2B625502-0E67-46E0-904D-CD11B7B2DE93}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Wopi", "Services.Wopi\SenseNet.Services.Wopi.csproj", "{C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Wopi.Tests", "Tests\SenseNet.Services.Wopi.Tests\SenseNet.Services.Wopi.Tests.csproj", "{E30A5CB4-33A1-4B54-9526-F671DAA44821}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "WebApps", "WebApps", "{61F11B98-137D-402E-AAF9-DA329D109B4B}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.InMem.Admin", "WebApps\SnWebApplication.Api.InMem.Admin\SnWebApplication.Api.InMem.Admin.csproj", "{9031FD6A-889D-431E-AE12-1FCB8A016F57}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.InMem.TokenAuth", "WebApps\SnWebApplication.Api.InMem.TokenAuth\SnWebApplication.Api.InMem.TokenAuth.csproj", "{BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.Admin", "WebApps\SnWebApplication.Api.Sql.Admin\SnWebApplication.Api.Sql.Admin.csproj", "{FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.TokenAuth", "WebApps\SnWebApplication.Api.Sql.TokenAuth\SnWebApplication.Api.Sql.TokenAuth.csproj", "{5277839B-309F-4EC4-B164-A2D860BA413C}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.MiddlewareTests", "Tests\SenseNet.MiddlewareTests\SenseNet.MiddlewareTests.csproj", "{7FDD4077-1372-401C-A698-96395DF0C8DB}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Tests.Core", "Tests\SenseNet.Tests.Core\SenseNet.Tests.Core.csproj", "{261098F3-F277-46E6-832C-5AAA872CB525}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Tests.Core.Tests", "Tests\SenseNet.Tests.Core.Tests\SenseNet.Tests.Core.Tests.csproj", "{988FFB5D-4485-475B-87FE-5D651D641CD7}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.IntegrationTests", "Tests\SenseNet.IntegrationTests\SenseNet.IntegrationTests.csproj", "{E53F97F9-A059-4252-8C09-8C6B63678F24}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.WebHooks.Common", "WebHooks.Common\SenseNet.WebHooks.Common.csproj", "{319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.WebHooks.Tests", "Tests\SenseNet.WebHooks.Tests\SenseNet.WebHooks.Tests.csproj", "{C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.WebHooks", "WebHooks\SenseNet.WebHooks.csproj", "{6F17E01E-8BEB-411B-84FD-316D7A29F128}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "WebHookHandler", "WebApps\WebHookHandler\WebHookHandler.csproj", "{ABDCC209-B20B-4027-A374-1476F5FF5F48}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Abstractions", "Abstractions\SenseNet.Abstractions.csproj", "{EC55EA80-E068-440B-A357-89D4CA1BECB8}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.SearchService.TokenAuth", "WebApps\SnWebApplication.Api.Sql.SearchService.TokenAuth\SnWebApplication.Api.Sql.SearchService.TokenAuth.csproj", "{1136196B-B909-47FE-9D70-1788402DF4D0}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.SearchService.Admin", "WebApps\SnWebApplication.Api.Sql.SearchService.Admin\SnWebApplication.Api.Sql.SearchService.Admin.csproj", "{ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "WebAppTests", "Tests\WebAppTests\WebAppTests.csproj", "{124EF02B-26DF-4410-9232-812B0D14526E}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository.MsSql", "ContentRepository.MsSql\SenseNet.ContentRepository.MsSql.csproj", "{415814BC-A9B9-4742-B716-6358E2913565}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.OpenApi", "OpenApi\SenseNet.OpenApi.csproj", "{170FCE1A-5077-4518-9444-8B4ADCE8A8FC}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.IntegrationTests.MsSql", "Tests\SenseNet.IntegrationTests.MsSql\SenseNet.IntegrationTests.MsSql.csproj", "{9E9B0B82-46B4-4A80-918F-32E855406DBC}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.TokenAuth.Preview", "WebApps\SnWebApplication.Api.Sql.TokenAuth.Preview\SnWebApplication.Api.Sql.TokenAuth.Preview.csproj", "{4C6517FA-E734-4090-BCE3-BC50FBC632B8}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.SearchService.TokenAuth.Preview", "WebApps\SnWebApplication.Api.Sql.SearchService.TokenAuth.Preview\SnWebApplication.Api.Sql.SearchService.TokenAuth.Preview.csproj", "{8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SnWebApplication.Api.Sql.TokenAuth.NLB", "WebApps\SnWebApplication.Api.Sql.TokenAuth.NLB\SnWebApplication.Api.Sql.TokenAuth.NLB.csproj", "{F91BACBA-937D-4282-CC0C-098383E793DC}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Debug|Mixed Platforms = Debug|Mixed Platforms - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|Any CPU = Release|Any CPU - Release|Mixed Platforms = Release|Mixed Platforms - Release|x64 = Release|x64 - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Any CPU.Build.0 = Debug|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|x64.ActiveCfg = Debug|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|x86.ActiveCfg = Debug|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Any CPU.ActiveCfg = Release|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Any CPU.Build.0 = Release|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|x64.ActiveCfg = Release|Any CPU - {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|x86.ActiveCfg = Release|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Any CPU.Build.0 = Debug|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|x64.ActiveCfg = Debug|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|x86.ActiveCfg = Debug|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Any CPU.ActiveCfg = Release|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Any CPU.Build.0 = Release|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|x64.ActiveCfg = Release|Any CPU - {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|x86.ActiveCfg = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Any CPU.Build.0 = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x64.ActiveCfg = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x64.Build.0 = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x86.ActiveCfg = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x86.Build.0 = Debug|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Any CPU.ActiveCfg = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Any CPU.Build.0 = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x64.ActiveCfg = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x64.Build.0 = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x86.ActiveCfg = Release|Any CPU - {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x86.Build.0 = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Any CPU.Build.0 = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x64.ActiveCfg = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x64.Build.0 = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x86.ActiveCfg = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x86.Build.0 = Debug|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Any CPU.ActiveCfg = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Any CPU.Build.0 = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x64.ActiveCfg = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x64.Build.0 = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x86.ActiveCfg = Release|Any CPU - {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x86.Build.0 = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Any CPU.Build.0 = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x64.ActiveCfg = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x64.Build.0 = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x86.ActiveCfg = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x86.Build.0 = Debug|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Any CPU.ActiveCfg = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Any CPU.Build.0 = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x64.ActiveCfg = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x64.Build.0 = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x86.ActiveCfg = Release|Any CPU - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x86.Build.0 = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x64.ActiveCfg = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x64.Build.0 = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x86.ActiveCfg = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x86.Build.0 = Debug|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Any CPU.Build.0 = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x64.ActiveCfg = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x64.Build.0 = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x86.ActiveCfg = Release|Any CPU - {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x86.Build.0 = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Any CPU.Build.0 = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x64.ActiveCfg = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x64.Build.0 = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x86.ActiveCfg = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x86.Build.0 = Debug|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Any CPU.ActiveCfg = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Any CPU.Build.0 = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x64.ActiveCfg = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x64.Build.0 = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x86.ActiveCfg = Release|Any CPU - {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x86.Build.0 = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x64.ActiveCfg = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x64.Build.0 = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x86.ActiveCfg = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x86.Build.0 = Debug|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Any CPU.Build.0 = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x64.ActiveCfg = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x64.Build.0 = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x86.ActiveCfg = Release|Any CPU - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x86.Build.0 = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Any CPU.Build.0 = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x64.ActiveCfg = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x64.Build.0 = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x86.ActiveCfg = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x86.Build.0 = Debug|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Any CPU.ActiveCfg = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Any CPU.Build.0 = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x64.ActiveCfg = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x64.Build.0 = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x86.ActiveCfg = Release|Any CPU - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x86.Build.0 = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Any CPU.Build.0 = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x64.ActiveCfg = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x64.Build.0 = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x86.ActiveCfg = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x86.Build.0 = Debug|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Any CPU.ActiveCfg = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Any CPU.Build.0 = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x64.ActiveCfg = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x64.Build.0 = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x86.ActiveCfg = Release|Any CPU - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x86.Build.0 = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Any CPU.Build.0 = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x64.ActiveCfg = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x64.Build.0 = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x86.ActiveCfg = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x86.Build.0 = Debug|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Any CPU.ActiveCfg = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Any CPU.Build.0 = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x64.ActiveCfg = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x64.Build.0 = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x86.ActiveCfg = Release|Any CPU - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x86.Build.0 = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Any CPU.Build.0 = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x64.ActiveCfg = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x64.Build.0 = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x86.ActiveCfg = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x86.Build.0 = Debug|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Any CPU.ActiveCfg = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Any CPU.Build.0 = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x64.ActiveCfg = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x64.Build.0 = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x86.ActiveCfg = Release|Any CPU - {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x86.Build.0 = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x64.ActiveCfg = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x64.Build.0 = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x86.ActiveCfg = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x86.Build.0 = Debug|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Any CPU.Build.0 = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x64.ActiveCfg = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x64.Build.0 = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x86.ActiveCfg = Release|Any CPU - {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x86.Build.0 = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Any CPU.Build.0 = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x64.ActiveCfg = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x64.Build.0 = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x86.ActiveCfg = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x86.Build.0 = Debug|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Any CPU.ActiveCfg = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Any CPU.Build.0 = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x64.ActiveCfg = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x64.Build.0 = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x86.ActiveCfg = Release|Any CPU - {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x86.Build.0 = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x64.ActiveCfg = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x64.Build.0 = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x86.ActiveCfg = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x86.Build.0 = Debug|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Any CPU.Build.0 = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x64.ActiveCfg = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x64.Build.0 = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x86.ActiveCfg = Release|Any CPU - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x86.Build.0 = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x64.ActiveCfg = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x64.Build.0 = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x86.ActiveCfg = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x86.Build.0 = Debug|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Any CPU.ActiveCfg = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Any CPU.Build.0 = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x64.ActiveCfg = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x64.Build.0 = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x86.ActiveCfg = Release|Any CPU - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x86.Build.0 = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Any CPU.Build.0 = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x64.ActiveCfg = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x64.Build.0 = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x86.ActiveCfg = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x86.Build.0 = Debug|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Any CPU.ActiveCfg = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Any CPU.Build.0 = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x64.ActiveCfg = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x64.Build.0 = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x86.ActiveCfg = Release|Any CPU - {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x86.Build.0 = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x64.ActiveCfg = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x64.Build.0 = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x86.ActiveCfg = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x86.Build.0 = Debug|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Any CPU.Build.0 = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x64.ActiveCfg = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x64.Build.0 = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x86.ActiveCfg = Release|Any CPU - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x86.Build.0 = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x64.ActiveCfg = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x64.Build.0 = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x86.ActiveCfg = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x86.Build.0 = Debug|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Any CPU.Build.0 = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x64.ActiveCfg = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x64.Build.0 = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x86.ActiveCfg = Release|Any CPU - {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x86.Build.0 = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x64.ActiveCfg = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x64.Build.0 = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x86.ActiveCfg = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x86.Build.0 = Debug|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Any CPU.Build.0 = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x64.ActiveCfg = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x64.Build.0 = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x86.ActiveCfg = Release|Any CPU - {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x86.Build.0 = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Any CPU.Build.0 = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x64.ActiveCfg = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x64.Build.0 = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x86.ActiveCfg = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x86.Build.0 = Debug|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Any CPU.ActiveCfg = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Any CPU.Build.0 = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x64.ActiveCfg = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x64.Build.0 = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x86.ActiveCfg = Release|Any CPU - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x86.Build.0 = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Any CPU.Build.0 = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x64.ActiveCfg = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x64.Build.0 = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x86.ActiveCfg = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x86.Build.0 = Debug|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Any CPU.ActiveCfg = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Any CPU.Build.0 = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x64.ActiveCfg = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x64.Build.0 = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x86.ActiveCfg = Release|Any CPU - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x86.Build.0 = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Any CPU.Build.0 = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x64.ActiveCfg = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x64.Build.0 = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x86.ActiveCfg = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x86.Build.0 = Debug|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Any CPU.ActiveCfg = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Any CPU.Build.0 = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x64.ActiveCfg = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x64.Build.0 = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x86.ActiveCfg = Release|Any CPU - {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x86.Build.0 = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Any CPU.Build.0 = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x64.ActiveCfg = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x64.Build.0 = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x86.ActiveCfg = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x86.Build.0 = Debug|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Any CPU.ActiveCfg = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Any CPU.Build.0 = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x64.ActiveCfg = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x64.Build.0 = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x86.ActiveCfg = Release|Any CPU - {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x86.Build.0 = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Any CPU.Build.0 = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x64.ActiveCfg = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x64.Build.0 = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x86.ActiveCfg = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x86.Build.0 = Debug|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Any CPU.ActiveCfg = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Any CPU.Build.0 = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x64.ActiveCfg = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x64.Build.0 = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x86.ActiveCfg = Release|Any CPU - {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x86.Build.0 = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Any CPU.Build.0 = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x64.ActiveCfg = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x64.Build.0 = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x86.ActiveCfg = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x86.Build.0 = Debug|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Any CPU.ActiveCfg = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Any CPU.Build.0 = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x64.ActiveCfg = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x64.Build.0 = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x86.ActiveCfg = Release|Any CPU - {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x86.Build.0 = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x64.ActiveCfg = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x64.Build.0 = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x86.ActiveCfg = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x86.Build.0 = Debug|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Any CPU.Build.0 = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x64.ActiveCfg = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x64.Build.0 = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x86.ActiveCfg = Release|Any CPU - {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x86.Build.0 = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Any CPU.Build.0 = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x64.ActiveCfg = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x64.Build.0 = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x86.ActiveCfg = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x86.Build.0 = Debug|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Any CPU.ActiveCfg = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Any CPU.Build.0 = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x64.ActiveCfg = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x64.Build.0 = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x86.ActiveCfg = Release|Any CPU - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x86.Build.0 = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x64.ActiveCfg = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x64.Build.0 = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x86.ActiveCfg = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x86.Build.0 = Debug|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Any CPU.Build.0 = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x64.ActiveCfg = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x64.Build.0 = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x86.ActiveCfg = Release|Any CPU - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x86.Build.0 = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x64.ActiveCfg = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x64.Build.0 = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x86.ActiveCfg = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x86.Build.0 = Debug|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Any CPU.Build.0 = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x64.ActiveCfg = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x64.Build.0 = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x86.ActiveCfg = Release|Any CPU - {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x86.Build.0 = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Any CPU.Build.0 = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x64.ActiveCfg = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x64.Build.0 = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x86.ActiveCfg = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x86.Build.0 = Debug|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Any CPU.ActiveCfg = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Any CPU.Build.0 = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x64.ActiveCfg = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x64.Build.0 = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x86.ActiveCfg = Release|Any CPU - {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x86.Build.0 = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Any CPU.Build.0 = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x64.ActiveCfg = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x64.Build.0 = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x86.ActiveCfg = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x86.Build.0 = Debug|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Any CPU.ActiveCfg = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Any CPU.Build.0 = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x64.ActiveCfg = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x64.Build.0 = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x86.ActiveCfg = Release|Any CPU - {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x86.Build.0 = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Any CPU.Build.0 = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x64.ActiveCfg = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x64.Build.0 = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x86.ActiveCfg = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x86.Build.0 = Debug|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Any CPU.ActiveCfg = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Any CPU.Build.0 = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x64.ActiveCfg = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x64.Build.0 = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x86.ActiveCfg = Release|Any CPU - {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x86.Build.0 = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Any CPU.Build.0 = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x64.ActiveCfg = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x64.Build.0 = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x86.ActiveCfg = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x86.Build.0 = Debug|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Any CPU.Build.0 = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x64.ActiveCfg = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x64.Build.0 = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x86.ActiveCfg = Release|Any CPU - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x86.Build.0 = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x64.ActiveCfg = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x64.Build.0 = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x86.ActiveCfg = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x86.Build.0 = Debug|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Any CPU.Build.0 = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x64.ActiveCfg = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x64.Build.0 = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x86.ActiveCfg = Release|Any CPU - {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x86.Build.0 = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Any CPU.Build.0 = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x64.ActiveCfg = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x64.Build.0 = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x86.ActiveCfg = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x86.Build.0 = Debug|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|Any CPU.ActiveCfg = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|Any CPU.Build.0 = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|x64.ActiveCfg = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|x64.Build.0 = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|x86.ActiveCfg = Release|Any CPU - {415814BC-A9B9-4742-B716-6358E2913565}.Release|x86.Build.0 = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Any CPU.Build.0 = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x64.ActiveCfg = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x64.Build.0 = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x86.ActiveCfg = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x86.Build.0 = Debug|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Any CPU.ActiveCfg = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Any CPU.Build.0 = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x64.ActiveCfg = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x64.Build.0 = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x86.ActiveCfg = Release|Any CPU - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x86.Build.0 = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x64.ActiveCfg = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x64.Build.0 = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x86.ActiveCfg = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x86.Build.0 = Debug|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Any CPU.Build.0 = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x64.ActiveCfg = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x64.Build.0 = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x86.ActiveCfg = Release|Any CPU - {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x86.Build.0 = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Any CPU.Build.0 = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x64.ActiveCfg = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x64.Build.0 = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x86.ActiveCfg = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x86.Build.0 = Debug|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Any CPU.ActiveCfg = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Any CPU.Build.0 = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x64.ActiveCfg = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x64.Build.0 = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x86.ActiveCfg = Release|Any CPU - {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x86.Build.0 = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x64.ActiveCfg = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x64.Build.0 = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x86.ActiveCfg = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x86.Build.0 = Debug|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Any CPU.ActiveCfg = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Any CPU.Build.0 = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x64.ActiveCfg = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x64.Build.0 = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x86.ActiveCfg = Release|Any CPU - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x86.Build.0 = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Any CPU.Build.0 = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x64.ActiveCfg = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x64.Build.0 = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x86.ActiveCfg = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x86.Build.0 = Debug|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Any CPU.ActiveCfg = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Any CPU.Build.0 = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Mixed Platforms.Build.0 = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x64.ActiveCfg = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x64.Build.0 = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x86.ActiveCfg = Release|Any CPU - {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x86.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(NestedProjects) = preSolution - {786E6165-CA02-45A9-BF58-207A45D7D6DF} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {5DB4DDBA-81F6-4D81-943A-18F3178B3355} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {A453E920-29C0-45CD-984C-0D8E3631B1E3} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {0279705B-779D-485D-86B9-F7AB3DD1F2C3} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {6B711835-9172-4F07-9FC3-BA79C7DFA916} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {43E6E75C-2410-464D-B7D6-36F8ACF85A17} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {C250C071-6ACD-42E0-9FFC-63283AFB8C6C} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {EF5A2EB2-723F-4CAF-9950-954421E3B0A0} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {B2F72AB7-36C5-4E95-839F-0DD340AC8C36} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {DF893D4B-BB76-4A5C-AB92-B1A8B283577F} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {2C97F180-2F80-49A5-9C8B-D114E4CCC819} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {E1FD0E29-39F8-40A1-89D7-F513DD783089} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {0A1EF09D-E36E-46A8-A734-844C09323873} = {875EF569-4684-473D-A2D4-A35B20B4A07C} - {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A} = {875EF569-4684-473D-A2D4-A35B20B4A07C} - {2B625502-0E67-46E0-904D-CD11B7B2DE93} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {E30A5CB4-33A1-4B54-9526-F671DAA44821} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {9031FD6A-889D-431E-AE12-1FCB8A016F57} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {5277839B-309F-4EC4-B164-A2D860BA413C} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {7FDD4077-1372-401C-A698-96395DF0C8DB} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {261098F3-F277-46E6-832C-5AAA872CB525} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {988FFB5D-4485-475B-87FE-5D651D641CD7} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {E53F97F9-A059-4252-8C09-8C6B63678F24} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {6F17E01E-8BEB-411B-84FD-316D7A29F128} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {ABDCC209-B20B-4027-A374-1476F5FF5F48} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {EC55EA80-E068-440B-A357-89D4CA1BECB8} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {1136196B-B909-47FE-9D70-1788402DF4D0} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {124EF02B-26DF-4410-9232-812B0D14526E} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {415814BC-A9B9-4742-B716-6358E2913565} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {170FCE1A-5077-4518-9444-8B4ADCE8A8FC} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} - {9E9B0B82-46B4-4A80-918F-32E855406DBC} = {C68D256D-7D40-4E33-8A2B-B1625538B138} - {4C6517FA-E734-4090-BCE3-BC50FBC632B8} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - {F91BACBA-937D-4282-CC0C-098383E793DC} = {61F11B98-137D-402E-AAF9-DA329D109B4B} - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {7D903DEB-CA0B-43D8-BD9D-820BB1453C4C} - EndGlobalSection - GlobalSection(TestCaseManagementSettings) = postSolution - CategoryFile = sensenet.vsmdi - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.32014.148 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tools", "Tools", "{875EF569-4684-473D-A2D4-A35B20B4A07C}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Core", "Core", "{2997D17C-A736-43E5-B3DD-11D11AC7DF17}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository", "ContentRepository\SenseNet.ContentRepository.csproj", "{786E6165-CA02-45A9-BF58-207A45D7D6DF}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Storage", "Storage\SenseNet.Storage.csproj", "{5DB4DDBA-81F6-4D81-943A-18F3178B3355}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Common", "Common\SenseNet.Common.csproj", "{A453E920-29C0-45CD-984C-0D8E3631B1E3}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{6D908666-E47F-47A7-A7E5-C696B9CD517A}" + ProjectSection(SolutionItems) = preProject + nuget\snadmin\install-services-core\manifest.xml = nuget\snadmin\install-services-core\manifest.xml + nuget\readme.txt = nuget\readme.txt + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tests", "Tests", "{C68D256D-7D40-4E33-8A2B-B1625538B138}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Search", "Search\SenseNet.Search.csproj", "{0279705B-779D-485D-86B9-F7AB3DD1F2C3}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository.Tests", "Tests\SenseNet.ContentRepository.Tests\SenseNet.ContentRepository.Tests.csproj", "{1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Packaging.Tests", "Tests\SenseNet.Packaging.Tests\SenseNet.Packaging.Tests.csproj", "{6B711835-9172-4F07-9FC3-BA79C7DFA916}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Search.Tests", "Tests\SenseNet.Search.Tests\SenseNet.Search.Tests.csproj", "{43E6E75C-2410-464D-B7D6-36F8ACF85A17}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.BlobStorage", "BlobStorage\SenseNet.BlobStorage.csproj", "{C250C071-6ACD-42E0-9FFC-63283AFB8C6C}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.OData", "OData\SenseNet.OData.csproj", "{EF5A2EB2-723F-4CAF-9950-954421E3B0A0}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ODataTests", "Tests\SenseNet.ODataTests\SenseNet.ODataTests.csproj", "{B2F72AB7-36C5-4E95-839F-0DD340AC8C36}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository.InMemory", "ContentRepository.InMemory\SenseNet.ContentRepository.InMemory.csproj", "{DF893D4B-BB76-4A5C-AB92-B1A8B283577F}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Core", "Services.Core\SenseNet.Services.Core.csproj", "{2C97F180-2F80-49A5-9C8B-D114E4CCC819}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Core.Install", "Services.Core.Install\SenseNet.Services.Core.Install.csproj", "{E1FD0E29-39F8-40A1-89D7-F513DD783089}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnConsoleInstaller", "Tools\SnConsoleInstaller\SnConsoleInstaller.csproj", "{0A1EF09D-E36E-46A8-A734-844C09323873}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Core.Tests", "Tests\SenseNet.Services.Core.Tests\SenseNet.Services.Core.Tests.csproj", "{D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnInitialDataGenerator", "Tools\SnInitialDataGenerator\SnInitialDataGenerator.csproj", "{7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnInitialDataGenerator.Tests", "Tests\SnInitialDataGenerator.Tests\SnInitialDataGenerator.Tests.csproj", "{2B625502-0E67-46E0-904D-CD11B7B2DE93}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Wopi", "Services.Wopi\SenseNet.Services.Wopi.csproj", "{C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Services.Wopi.Tests", "Tests\SenseNet.Services.Wopi.Tests\SenseNet.Services.Wopi.Tests.csproj", "{E30A5CB4-33A1-4B54-9526-F671DAA44821}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "WebApps", "WebApps", "{61F11B98-137D-402E-AAF9-DA329D109B4B}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.InMem.Admin", "WebApps\SnWebApplication.Api.InMem.Admin\SnWebApplication.Api.InMem.Admin.csproj", "{9031FD6A-889D-431E-AE12-1FCB8A016F57}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.InMem.TokenAuth", "WebApps\SnWebApplication.Api.InMem.TokenAuth\SnWebApplication.Api.InMem.TokenAuth.csproj", "{BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.Admin", "WebApps\SnWebApplication.Api.Sql.Admin\SnWebApplication.Api.Sql.Admin.csproj", "{FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.TokenAuth", "WebApps\SnWebApplication.Api.Sql.TokenAuth\SnWebApplication.Api.Sql.TokenAuth.csproj", "{5277839B-309F-4EC4-B164-A2D860BA413C}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.MiddlewareTests", "Tests\SenseNet.MiddlewareTests\SenseNet.MiddlewareTests.csproj", "{7FDD4077-1372-401C-A698-96395DF0C8DB}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Tests.Core", "Tests\SenseNet.Tests.Core\SenseNet.Tests.Core.csproj", "{261098F3-F277-46E6-832C-5AAA872CB525}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Tests.Core.Tests", "Tests\SenseNet.Tests.Core.Tests\SenseNet.Tests.Core.Tests.csproj", "{988FFB5D-4485-475B-87FE-5D651D641CD7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.IntegrationTests", "Tests\SenseNet.IntegrationTests\SenseNet.IntegrationTests.csproj", "{E53F97F9-A059-4252-8C09-8C6B63678F24}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.WebHooks.Common", "WebHooks.Common\SenseNet.WebHooks.Common.csproj", "{319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.WebHooks.Tests", "Tests\SenseNet.WebHooks.Tests\SenseNet.WebHooks.Tests.csproj", "{C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.WebHooks", "WebHooks\SenseNet.WebHooks.csproj", "{6F17E01E-8BEB-411B-84FD-316D7A29F128}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "WebHookHandler", "WebApps\WebHookHandler\WebHookHandler.csproj", "{ABDCC209-B20B-4027-A374-1476F5FF5F48}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.Abstractions", "Abstractions\SenseNet.Abstractions.csproj", "{EC55EA80-E068-440B-A357-89D4CA1BECB8}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.SearchService.TokenAuth", "WebApps\SnWebApplication.Api.Sql.SearchService.TokenAuth\SnWebApplication.Api.Sql.SearchService.TokenAuth.csproj", "{1136196B-B909-47FE-9D70-1788402DF4D0}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.SearchService.Admin", "WebApps\SnWebApplication.Api.Sql.SearchService.Admin\SnWebApplication.Api.Sql.SearchService.Admin.csproj", "{ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "WebAppTests", "Tests\WebAppTests\WebAppTests.csproj", "{124EF02B-26DF-4410-9232-812B0D14526E}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.ContentRepository.MsSql", "ContentRepository.MsSql\SenseNet.ContentRepository.MsSql.csproj", "{415814BC-A9B9-4742-B716-6358E2913565}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.OpenApi", "OpenApi\SenseNet.OpenApi.csproj", "{170FCE1A-5077-4518-9444-8B4ADCE8A8FC}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SenseNet.IntegrationTests.MsSql", "Tests\SenseNet.IntegrationTests.MsSql\SenseNet.IntegrationTests.MsSql.csproj", "{9E9B0B82-46B4-4A80-918F-32E855406DBC}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.TokenAuth.Preview", "WebApps\SnWebApplication.Api.Sql.TokenAuth.Preview\SnWebApplication.Api.Sql.TokenAuth.Preview.csproj", "{4C6517FA-E734-4090-BCE3-BC50FBC632B8}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SnWebApplication.Api.Sql.SearchService.TokenAuth.Preview", "WebApps\SnWebApplication.Api.Sql.SearchService.TokenAuth.Preview\SnWebApplication.Api.Sql.SearchService.TokenAuth.Preview.csproj", "{8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SnWebApplication.Api.Sql.TokenAuth.NLB", "WebApps\SnWebApplication.Api.Sql.TokenAuth.NLB\SnWebApplication.Api.Sql.TokenAuth.NLB.csproj", "{F91BACBA-937D-4282-CC0C-098383E793DC}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "ContentRepository.PostgreSql", "ContentRepository.PostgreSql", "{83C76EEC-405A-CB63-4B2B-8F87D45E23B6}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SenseNet.ContentRepository.PostgreSql", "ContentRepository.PostgreSql\SenseNet.ContentRepository.PostgreSql.csproj", "{E13051DE-12E2-4065-9605-46FAE1475DB0}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "ContentRepository", "ContentRepository", "{59956430-9F3D-0442-91CD-7506CF199E10}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Storage", "Storage", "{2F2B95E5-6225-D1F4-CACB-8A4C26993FF3}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|Mixed Platforms = Debug|Mixed Platforms + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|Mixed Platforms = Release|Mixed Platforms + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Any CPU.Build.0 = Debug|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|x64.ActiveCfg = Debug|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Debug|x86.ActiveCfg = Debug|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Any CPU.ActiveCfg = Release|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Any CPU.Build.0 = Release|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|x64.ActiveCfg = Release|Any CPU + {786E6165-CA02-45A9-BF58-207A45D7D6DF}.Release|x86.ActiveCfg = Release|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Any CPU.Build.0 = Debug|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|x64.ActiveCfg = Debug|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Debug|x86.ActiveCfg = Debug|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Any CPU.ActiveCfg = Release|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Any CPU.Build.0 = Release|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|x64.ActiveCfg = Release|Any CPU + {5DB4DDBA-81F6-4D81-943A-18F3178B3355}.Release|x86.ActiveCfg = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x64.ActiveCfg = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x64.Build.0 = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x86.ActiveCfg = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Debug|x86.Build.0 = Debug|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Any CPU.Build.0 = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x64.ActiveCfg = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x64.Build.0 = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x86.ActiveCfg = Release|Any CPU + {A453E920-29C0-45CD-984C-0D8E3631B1E3}.Release|x86.Build.0 = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x64.ActiveCfg = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x64.Build.0 = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x86.ActiveCfg = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Debug|x86.Build.0 = Debug|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Any CPU.Build.0 = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x64.ActiveCfg = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x64.Build.0 = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x86.ActiveCfg = Release|Any CPU + {0279705B-779D-485D-86B9-F7AB3DD1F2C3}.Release|x86.Build.0 = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x64.ActiveCfg = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x64.Build.0 = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x86.ActiveCfg = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Debug|x86.Build.0 = Debug|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Any CPU.Build.0 = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x64.ActiveCfg = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x64.Build.0 = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x86.ActiveCfg = Release|Any CPU + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7}.Release|x86.Build.0 = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x64.ActiveCfg = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x64.Build.0 = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x86.ActiveCfg = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Debug|x86.Build.0 = Debug|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Any CPU.Build.0 = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x64.ActiveCfg = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x64.Build.0 = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x86.ActiveCfg = Release|Any CPU + {6B711835-9172-4F07-9FC3-BA79C7DFA916}.Release|x86.Build.0 = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Any CPU.Build.0 = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x64.ActiveCfg = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x64.Build.0 = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x86.ActiveCfg = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Debug|x86.Build.0 = Debug|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Any CPU.ActiveCfg = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Any CPU.Build.0 = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x64.ActiveCfg = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x64.Build.0 = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x86.ActiveCfg = Release|Any CPU + {43E6E75C-2410-464D-B7D6-36F8ACF85A17}.Release|x86.Build.0 = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x64.ActiveCfg = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x64.Build.0 = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x86.ActiveCfg = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Debug|x86.Build.0 = Debug|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Any CPU.Build.0 = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x64.ActiveCfg = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x64.Build.0 = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x86.ActiveCfg = Release|Any CPU + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C}.Release|x86.Build.0 = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x64.ActiveCfg = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x64.Build.0 = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x86.ActiveCfg = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Debug|x86.Build.0 = Debug|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Any CPU.Build.0 = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x64.ActiveCfg = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x64.Build.0 = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x86.ActiveCfg = Release|Any CPU + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0}.Release|x86.Build.0 = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x64.ActiveCfg = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x64.Build.0 = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x86.ActiveCfg = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Debug|x86.Build.0 = Debug|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Any CPU.Build.0 = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x64.ActiveCfg = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x64.Build.0 = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x86.ActiveCfg = Release|Any CPU + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36}.Release|x86.Build.0 = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x64.ActiveCfg = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x64.Build.0 = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x86.ActiveCfg = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Debug|x86.Build.0 = Debug|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Any CPU.Build.0 = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x64.ActiveCfg = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x64.Build.0 = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x86.ActiveCfg = Release|Any CPU + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F}.Release|x86.Build.0 = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x64.ActiveCfg = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x64.Build.0 = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x86.ActiveCfg = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Debug|x86.Build.0 = Debug|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Any CPU.Build.0 = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x64.ActiveCfg = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x64.Build.0 = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x86.ActiveCfg = Release|Any CPU + {2C97F180-2F80-49A5-9C8B-D114E4CCC819}.Release|x86.Build.0 = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x64.ActiveCfg = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x64.Build.0 = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x86.ActiveCfg = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Debug|x86.Build.0 = Debug|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Any CPU.Build.0 = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x64.ActiveCfg = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x64.Build.0 = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x86.ActiveCfg = Release|Any CPU + {E1FD0E29-39F8-40A1-89D7-F513DD783089}.Release|x86.Build.0 = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x64.ActiveCfg = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x64.Build.0 = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x86.ActiveCfg = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Debug|x86.Build.0 = Debug|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Any CPU.Build.0 = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x64.ActiveCfg = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x64.Build.0 = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x86.ActiveCfg = Release|Any CPU + {0A1EF09D-E36E-46A8-A734-844C09323873}.Release|x86.Build.0 = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x64.ActiveCfg = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x64.Build.0 = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x86.ActiveCfg = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Debug|x86.Build.0 = Debug|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Any CPU.Build.0 = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x64.ActiveCfg = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x64.Build.0 = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x86.ActiveCfg = Release|Any CPU + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5}.Release|x86.Build.0 = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Any CPU.Build.0 = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x64.ActiveCfg = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x64.Build.0 = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x86.ActiveCfg = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Debug|x86.Build.0 = Debug|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Any CPU.ActiveCfg = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Any CPU.Build.0 = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x64.ActiveCfg = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x64.Build.0 = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x86.ActiveCfg = Release|Any CPU + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A}.Release|x86.Build.0 = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x64.ActiveCfg = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x64.Build.0 = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x86.ActiveCfg = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Debug|x86.Build.0 = Debug|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Any CPU.Build.0 = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x64.ActiveCfg = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x64.Build.0 = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x86.ActiveCfg = Release|Any CPU + {2B625502-0E67-46E0-904D-CD11B7B2DE93}.Release|x86.Build.0 = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x64.ActiveCfg = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x64.Build.0 = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x86.ActiveCfg = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Debug|x86.Build.0 = Debug|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Any CPU.Build.0 = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x64.ActiveCfg = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x64.Build.0 = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x86.ActiveCfg = Release|Any CPU + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22}.Release|x86.Build.0 = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x64.ActiveCfg = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x64.Build.0 = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x86.ActiveCfg = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Debug|x86.Build.0 = Debug|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Any CPU.Build.0 = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x64.ActiveCfg = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x64.Build.0 = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x86.ActiveCfg = Release|Any CPU + {E30A5CB4-33A1-4B54-9526-F671DAA44821}.Release|x86.Build.0 = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x64.ActiveCfg = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x64.Build.0 = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x86.ActiveCfg = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Debug|x86.Build.0 = Debug|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Any CPU.Build.0 = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x64.ActiveCfg = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x64.Build.0 = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x86.ActiveCfg = Release|Any CPU + {9031FD6A-889D-431E-AE12-1FCB8A016F57}.Release|x86.Build.0 = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Any CPU.Build.0 = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x64.ActiveCfg = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x64.Build.0 = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x86.ActiveCfg = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Debug|x86.Build.0 = Debug|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Any CPU.ActiveCfg = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Any CPU.Build.0 = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x64.ActiveCfg = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x64.Build.0 = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x86.ActiveCfg = Release|Any CPU + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084}.Release|x86.Build.0 = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x64.ActiveCfg = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x64.Build.0 = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x86.ActiveCfg = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Debug|x86.Build.0 = Debug|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Any CPU.Build.0 = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x64.ActiveCfg = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x64.Build.0 = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x86.ActiveCfg = Release|Any CPU + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702}.Release|x86.Build.0 = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x64.ActiveCfg = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x64.Build.0 = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x86.ActiveCfg = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Debug|x86.Build.0 = Debug|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Any CPU.Build.0 = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x64.ActiveCfg = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x64.Build.0 = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x86.ActiveCfg = Release|Any CPU + {5277839B-309F-4EC4-B164-A2D860BA413C}.Release|x86.Build.0 = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x64.ActiveCfg = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x64.Build.0 = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x86.ActiveCfg = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Debug|x86.Build.0 = Debug|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Any CPU.Build.0 = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x64.ActiveCfg = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x64.Build.0 = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x86.ActiveCfg = Release|Any CPU + {7FDD4077-1372-401C-A698-96395DF0C8DB}.Release|x86.Build.0 = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Any CPU.Build.0 = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x64.ActiveCfg = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x64.Build.0 = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x86.ActiveCfg = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Debug|x86.Build.0 = Debug|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Any CPU.ActiveCfg = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Any CPU.Build.0 = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x64.ActiveCfg = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x64.Build.0 = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x86.ActiveCfg = Release|Any CPU + {261098F3-F277-46E6-832C-5AAA872CB525}.Release|x86.Build.0 = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x64.ActiveCfg = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x64.Build.0 = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x86.ActiveCfg = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Debug|x86.Build.0 = Debug|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Any CPU.Build.0 = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x64.ActiveCfg = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x64.Build.0 = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x86.ActiveCfg = Release|Any CPU + {988FFB5D-4485-475B-87FE-5D651D641CD7}.Release|x86.Build.0 = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x64.ActiveCfg = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x64.Build.0 = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x86.ActiveCfg = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Debug|x86.Build.0 = Debug|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Any CPU.Build.0 = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x64.ActiveCfg = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x64.Build.0 = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x86.ActiveCfg = Release|Any CPU + {E53F97F9-A059-4252-8C09-8C6B63678F24}.Release|x86.Build.0 = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x64.ActiveCfg = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x64.Build.0 = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x86.ActiveCfg = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Debug|x86.Build.0 = Debug|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Any CPU.Build.0 = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x64.ActiveCfg = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x64.Build.0 = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x86.ActiveCfg = Release|Any CPU + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C}.Release|x86.Build.0 = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x64.ActiveCfg = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x64.Build.0 = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x86.ActiveCfg = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Debug|x86.Build.0 = Debug|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Any CPU.Build.0 = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x64.ActiveCfg = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x64.Build.0 = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x86.ActiveCfg = Release|Any CPU + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2}.Release|x86.Build.0 = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x64.ActiveCfg = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x64.Build.0 = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x86.ActiveCfg = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Debug|x86.Build.0 = Debug|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Any CPU.Build.0 = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x64.ActiveCfg = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x64.Build.0 = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x86.ActiveCfg = Release|Any CPU + {6F17E01E-8BEB-411B-84FD-316D7A29F128}.Release|x86.Build.0 = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Any CPU.Build.0 = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x64.ActiveCfg = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x64.Build.0 = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x86.ActiveCfg = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Debug|x86.Build.0 = Debug|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Any CPU.ActiveCfg = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Any CPU.Build.0 = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x64.ActiveCfg = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x64.Build.0 = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x86.ActiveCfg = Release|Any CPU + {ABDCC209-B20B-4027-A374-1476F5FF5F48}.Release|x86.Build.0 = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x64.ActiveCfg = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x64.Build.0 = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x86.ActiveCfg = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Debug|x86.Build.0 = Debug|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Any CPU.Build.0 = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x64.ActiveCfg = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x64.Build.0 = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x86.ActiveCfg = Release|Any CPU + {EC55EA80-E068-440B-A357-89D4CA1BECB8}.Release|x86.Build.0 = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x64.ActiveCfg = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x64.Build.0 = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x86.ActiveCfg = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Debug|x86.Build.0 = Debug|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Any CPU.Build.0 = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x64.ActiveCfg = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x64.Build.0 = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x86.ActiveCfg = Release|Any CPU + {1136196B-B909-47FE-9D70-1788402DF4D0}.Release|x86.Build.0 = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Any CPU.Build.0 = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x64.ActiveCfg = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x64.Build.0 = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x86.ActiveCfg = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Debug|x86.Build.0 = Debug|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Any CPU.Build.0 = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x64.ActiveCfg = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x64.Build.0 = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x86.ActiveCfg = Release|Any CPU + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2}.Release|x86.Build.0 = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x64.ActiveCfg = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x64.Build.0 = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x86.ActiveCfg = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Debug|x86.Build.0 = Debug|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Any CPU.Build.0 = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x64.ActiveCfg = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x64.Build.0 = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x86.ActiveCfg = Release|Any CPU + {124EF02B-26DF-4410-9232-812B0D14526E}.Release|x86.Build.0 = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Any CPU.Build.0 = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x64.ActiveCfg = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x64.Build.0 = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x86.ActiveCfg = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Debug|x86.Build.0 = Debug|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|Any CPU.ActiveCfg = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|Any CPU.Build.0 = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|x64.ActiveCfg = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|x64.Build.0 = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|x86.ActiveCfg = Release|Any CPU + {415814BC-A9B9-4742-B716-6358E2913565}.Release|x86.Build.0 = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x64.ActiveCfg = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x64.Build.0 = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x86.ActiveCfg = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Debug|x86.Build.0 = Debug|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Any CPU.Build.0 = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x64.ActiveCfg = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x64.Build.0 = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x86.ActiveCfg = Release|Any CPU + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC}.Release|x86.Build.0 = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x64.ActiveCfg = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x64.Build.0 = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x86.ActiveCfg = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Debug|x86.Build.0 = Debug|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Any CPU.Build.0 = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x64.ActiveCfg = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x64.Build.0 = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x86.ActiveCfg = Release|Any CPU + {9E9B0B82-46B4-4A80-918F-32E855406DBC}.Release|x86.Build.0 = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Any CPU.Build.0 = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x64.ActiveCfg = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x64.Build.0 = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x86.ActiveCfg = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Debug|x86.Build.0 = Debug|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Any CPU.ActiveCfg = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Any CPU.Build.0 = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x64.ActiveCfg = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x64.Build.0 = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x86.ActiveCfg = Release|Any CPU + {4C6517FA-E734-4090-BCE3-BC50FBC632B8}.Release|x86.Build.0 = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x64.ActiveCfg = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x64.Build.0 = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x86.ActiveCfg = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Debug|x86.Build.0 = Debug|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Any CPU.Build.0 = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x64.ActiveCfg = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x64.Build.0 = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x86.ActiveCfg = Release|Any CPU + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6}.Release|x86.Build.0 = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x64.ActiveCfg = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x64.Build.0 = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x86.ActiveCfg = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Debug|x86.Build.0 = Debug|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Any CPU.Build.0 = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x64.ActiveCfg = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x64.Build.0 = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x86.ActiveCfg = Release|Any CPU + {F91BACBA-937D-4282-CC0C-098383E793DC}.Release|x86.Build.0 = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|Mixed Platforms.ActiveCfg = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|Mixed Platforms.Build.0 = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|x64.ActiveCfg = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|x64.Build.0 = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|x86.ActiveCfg = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Debug|x86.Build.0 = Debug|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|Any CPU.Build.0 = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|Mixed Platforms.ActiveCfg = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|Mixed Platforms.Build.0 = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|x64.ActiveCfg = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|x64.Build.0 = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|x86.ActiveCfg = Release|Any CPU + {E13051DE-12E2-4065-9605-46FAE1475DB0}.Release|x86.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {786E6165-CA02-45A9-BF58-207A45D7D6DF} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {5DB4DDBA-81F6-4D81-943A-18F3178B3355} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {A453E920-29C0-45CD-984C-0D8E3631B1E3} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {0279705B-779D-485D-86B9-F7AB3DD1F2C3} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {1CF177B1-BCF9-4634-AD71-9DC6D0E58AB7} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {6B711835-9172-4F07-9FC3-BA79C7DFA916} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {43E6E75C-2410-464D-B7D6-36F8ACF85A17} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {C250C071-6ACD-42E0-9FFC-63283AFB8C6C} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {EF5A2EB2-723F-4CAF-9950-954421E3B0A0} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {B2F72AB7-36C5-4E95-839F-0DD340AC8C36} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {DF893D4B-BB76-4A5C-AB92-B1A8B283577F} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {2C97F180-2F80-49A5-9C8B-D114E4CCC819} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {E1FD0E29-39F8-40A1-89D7-F513DD783089} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {0A1EF09D-E36E-46A8-A734-844C09323873} = {875EF569-4684-473D-A2D4-A35B20B4A07C} + {D52DFAF4-A0DD-4926-BF8E-BF67B0A9A5C5} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {7FA30B8F-0BB0-4A08-B0E1-6AA8D5CCC54A} = {875EF569-4684-473D-A2D4-A35B20B4A07C} + {2B625502-0E67-46E0-904D-CD11B7B2DE93} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {C5E63F70-AFD6-401B-BA3E-1707CDA1AC22} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {E30A5CB4-33A1-4B54-9526-F671DAA44821} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {9031FD6A-889D-431E-AE12-1FCB8A016F57} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {BDC6A01A-8AE2-40BC-BAD1-E0CF06E7F084} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {FFFAE7E4-FF2A-4061-86C6-C4AA8B62B702} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {5277839B-309F-4EC4-B164-A2D860BA413C} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {7FDD4077-1372-401C-A698-96395DF0C8DB} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {261098F3-F277-46E6-832C-5AAA872CB525} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {988FFB5D-4485-475B-87FE-5D651D641CD7} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {E53F97F9-A059-4252-8C09-8C6B63678F24} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {319E4006-B1FD-4EF8-AF7A-F750AAEEF39C} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {C1E6CCA7-2AEE-4CF7-9FFB-3F7E5EB3CDC2} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {6F17E01E-8BEB-411B-84FD-316D7A29F128} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {ABDCC209-B20B-4027-A374-1476F5FF5F48} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {EC55EA80-E068-440B-A357-89D4CA1BECB8} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {1136196B-B909-47FE-9D70-1788402DF4D0} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {ED5CDB50-26D9-40E0-B1D9-80D1977FA2E2} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {124EF02B-26DF-4410-9232-812B0D14526E} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {415814BC-A9B9-4742-B716-6358E2913565} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {170FCE1A-5077-4518-9444-8B4ADCE8A8FC} = {2997D17C-A736-43E5-B3DD-11D11AC7DF17} + {9E9B0B82-46B4-4A80-918F-32E855406DBC} = {C68D256D-7D40-4E33-8A2B-B1625538B138} + {4C6517FA-E734-4090-BCE3-BC50FBC632B8} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {8BA4865A-4C72-43C1-A3B2-FC21119C2CD6} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {F91BACBA-937D-4282-CC0C-098383E793DC} = {61F11B98-137D-402E-AAF9-DA329D109B4B} + {E13051DE-12E2-4065-9605-46FAE1475DB0} = {83C76EEC-405A-CB63-4B2B-8F87D45E23B6} + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {7D903DEB-CA0B-43D8-BD9D-820BB1453C4C} + EndGlobalSection + GlobalSection(TestCaseManagementSettings) = postSolution + CategoryFile = sensenet.vsmdi + EndGlobalSection +EndGlobal diff --git a/src/Services.Core.Install/SenseNet.Services.Core.Install.csproj b/src/Services.Core.Install/SenseNet.Services.Core.Install.csproj index 8dcab7c1a..6a0e3d4c1 100644 --- a/src/Services.Core.Install/SenseNet.Services.Core.Install.csproj +++ b/src/Services.Core.Install/SenseNet.Services.Core.Install.csproj @@ -29,8 +29,15 @@ - + + + + + + + diff --git a/src/Services.Core/Diagnostics/HealthHandler.cs b/src/Services.Core/Diagnostics/HealthHandler.cs index 1a37d600e..a89f2c1c1 100644 --- a/src/Services.Core/Diagnostics/HealthHandler.cs +++ b/src/Services.Core/Diagnostics/HealthHandler.cs @@ -319,6 +319,76 @@ private async Task GetIdentityHealthAsync(IServiceProvider service return health; } private async Task GetIdentityHealthAsync(AuthenticationOptions options, CancellationToken cancel) + { + if (options.AuthServerType == AuthenticationServerType.SNAuth) + return await GetSNAuthHealthAsync(options, cancel); + + return await GetIdentityServerHealthAsync(options, cancel); + } + + private async Task GetSNAuthHealthAsync(AuthenticationOptions options, CancellationToken cancel) + { + var timeout = TimeSpan.FromSeconds(4); + var combinedCancel = CancellationTokenSource.CreateLinkedTokenSource( + new CancellationTokenSource(timeout).Token, cancel).Token; + + HttpResponseMessage response = null; + string error = null; + TimeSpan? elapsed = null; + var timer = Stopwatch.StartNew(); + + var baseUrl = (!string.IsNullOrEmpty(options.MetadataHost) + ? options.MetadataHost + : options.Authority).TrimEnd('/'); + + try + { + var url = baseUrl + "/"; + var client = _httpClientFactory.CreateClient(); + response = await client.GetAsync(url, combinedCancel).ConfigureAwait(false); + elapsed = timer.Elapsed; + } + catch (TaskCanceledException ee) + { + elapsed = timer.Elapsed; + error = elapsed > timeout ? $"Response timeout reached ({timeout})." : ee.Message; + } + catch (Exception e) + { + error = e.Message; + } + timer.Stop(); + + if (response == null || error != null) + { + return new HealthResult + { + Color = HealthColor.Red, + Reason = $"{(response == null ? "No response. " : string.Empty)}Error: '{error}'", + Method = "Trying to reach SNAuth server." + }; + } + + if (response.StatusCode == HttpStatusCode.OK) + { + return new HealthResult + { + Color = HealthColor.Green, + ResponseTime = elapsed, + Method = "Checking SNAuth server availability." + }; + } + + return new HealthResult + { + Color = HealthColor.Yellow, + ResponseTime = elapsed, + Reason = $"Response status is {(int)response.StatusCode} {response.StatusCode}, expected: {(int)HttpStatusCode.OK} {HttpStatusCode.OK}", + Method = "Checking SNAuth server availability." + }; + } + + private async Task GetIdentityServerHealthAsync(AuthenticationOptions options, CancellationToken cancel) { var timeout = TimeSpan.FromSeconds(4); var combinedCancel = CancellationTokenSource.CreateLinkedTokenSource( @@ -331,7 +401,10 @@ private async Task GetIdentityHealthAsync(AuthenticationOptions op try { - var url = options.Authority.TrimEnd('/') + "/.well-known/openid-configuration"; + var baseUrl = (!string.IsNullOrEmpty(options.MetadataHost) + ? options.MetadataHost + : options.Authority).TrimEnd('/'); + var url = baseUrl + "/.well-known/openid-configuration"; var client = _httpClientFactory.CreateClient(); response = await client.GetAsync(url, combinedCancel).ConfigureAwait(false); elapsed = timer.Elapsed; diff --git a/src/Storage/Data/RelationalDataProviderBase.cs b/src/Storage/Data/RelationalDataProviderBase.cs index 62cd6a017..2b2d663fc 100644 --- a/src/Storage/Data/RelationalDataProviderBase.cs +++ b/src/Storage/Data/RelationalDataProviderBase.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Data; using System.Data.Common; -using Microsoft.Data.SqlClient; using System.Globalization; using System.Linq; using System.Text; @@ -1418,11 +1417,11 @@ public override async Task InstanceCountAsync(int[] nodeTypeIds, Cancellati string.Join(", ", Enumerable.Range(0, nodeTypeIds.Length).Select(i => "@Id" + i))); using var ctx = CreateDataContext(cancellationToken); - var result = (int)await ctx.ExecuteScalarAsync(sql, cmd => + var result = Convert.ToInt32(await ctx.ExecuteScalarAsync(sql, cmd => { var index = 0; cmd.Parameters.AddRange(nodeTypeIds.Select(i => ctx.CreateParameter("@Id" + index++, DbType.Int32, i)).ToArray()); - }).ConfigureAwait(false); + }).ConfigureAwait(false)); op.Successful = true; return result; @@ -1684,7 +1683,7 @@ protected string[] GetParentChain(string path) paths[0] = "/" + paths[0]; for (int i = 1; i < paths.Length; i++) paths[i] = paths[i - 1] + "/" + paths[i]; - return paths.Reverse().ToArray(); + return ((IEnumerable)paths).Reverse().ToArray(); } protected DateTime GetObsoleteLimitTime() { @@ -2366,10 +2365,10 @@ public override async Task WriteAuditEventAsync(AuditEventInfo auditEvent, Cance ctx.CreateParameter("@LogDate", DbType.DateTime, auditEvent.Timestamp), ctx.CreateParameter("@MachineName", DbType.AnsiString, 32, (object)auditEvent.MachineName ?? DBNull.Value), ctx.CreateParameter("@AppDomainName", DbType.AnsiString, 512, (object)auditEvent.AppDomainName ?? DBNull.Value), - ctx.CreateParameter("@ProcessID", DbType.AnsiString, 256, auditEvent.ProcessId), + ctx.CreateParameter("@ProcessID", DbType.AnsiString, 256, auditEvent.ProcessId.ToString()), ctx.CreateParameter("@ProcessName", DbType.AnsiString, 512, (object)auditEvent.ProcessName ?? DBNull.Value), ctx.CreateParameter("@ThreadName", DbType.AnsiString, 512, (object)auditEvent.ThreadName ?? DBNull.Value), - ctx.CreateParameter("@Win32ThreadId", DbType.AnsiString, 128, auditEvent.ThreadId), + ctx.CreateParameter("@Win32ThreadId", DbType.AnsiString, 128, auditEvent.ThreadId.ToString()), ctx.CreateParameter("@Message", DbType.String, 1500, (object)auditEvent.Message ?? DBNull.Value), ctx.CreateParameter("@Formattedmessage", DbType.String, int.MaxValue, (object)auditEvent.FormattedMessage ?? DBNull.Value), }); @@ -2478,13 +2477,13 @@ public override async Task GetNodeCountAsync(string path, CancellationToken "GetNodeCount(path: {0})", path); using var ctx = CreateDataContext(cancellationToken); - var result = (int)await ctx.ExecuteScalarAsync( + var result = Convert.ToInt32(await ctx.ExecuteScalarAsync( path == null ? GetNodeCountScript : GetNodeCountInSubtreeScript, cmd => { if (path != null) cmd.Parameters.Add(ctx.CreateParameter("@Path", DbType.String, path)); - }).ConfigureAwait(false); + }).ConfigureAwait(false)); op.Successful = true; return result; @@ -2498,13 +2497,13 @@ public override async Task GetVersionCountAsync(string path, CancellationTo "GetVersionCount(path: {0})", path); using var ctx = CreateDataContext(cancellationToken); - var result = (int)await ctx.ExecuteScalarAsync( + var result = Convert.ToInt32(await ctx.ExecuteScalarAsync( path == null ? GetVersionCountScript : GetVersionCountInSubtreeScript, cmd => { if (path != null) cmd.Parameters.Add(ctx.CreateParameter("@Path", DbType.String, path)); - }).ConfigureAwait(false); + }).ConfigureAwait(false)); op.Successful = true; return result; @@ -2562,11 +2561,17 @@ THEN CAST(1 AS BIT) var dbResult = await ctx.ExecuteScalarAsync(schemaCheckSql).ConfigureAwait(false); result = Convert.ToBoolean(dbResult); } - catch (SqlException ex) + catch (DbException ex) { // Cannot open database requested by the login. The login failed. // This is possibly a sign that the db does not exist yet. - if (ex.Number is 4060 or 233) + // MSSQL error numbers: 4060 (cannot open database), 233 (connection error) + // PostgreSQL SqlState: 3D000 (invalid_catalog_name), 08006 (connection_failure), 08001 (sqlclient_unable_to_establish) + var numberProp = ex.GetType().GetProperty("Number"); + if (numberProp?.GetValue(ex) is int number && number is 4060 or 233) + result = false; + else if (ex.GetType().GetProperty("SqlState")?.GetValue(ex) is string sqlState + && sqlState is "3D000" or "08006" or "08001") result = false; else throw; @@ -2750,9 +2755,22 @@ protected Task RetryAsync(Func> action, CancellationToken cancel) protected virtual bool ShouldRetryOnError(Exception ex) { - //TODO: generalize the expression by relying on error codes instead of hardcoded message texts - return (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) || - (ex is SqlException && ex.Message.Contains("A network-related or instance-specific error occurred")); + if (ex is InvalidOperationException && ex.Message.Contains("connection from the pool")) + return true; + + if (ex is DbException dbEx) + { + // Check for the IsTransient property (available on NpgsqlException and newer SqlException) + var isTransientProp = dbEx.GetType().GetProperty("IsTransient"); + if (isTransientProp?.GetValue(dbEx) is bool isTransient && isTransient) + return true; + + // Fallback for SQL Server specific network error message + if (dbEx.Message.Contains("A network-related or instance-specific error occurred")) + return true; + } + + return false; } } } diff --git a/src/Storage/Security/AccessTokenVault.cs b/src/Storage/Security/AccessTokenVault.cs index b7b9e42d8..7b02c42cf 100644 --- a/src/Storage/Security/AccessTokenVault.cs +++ b/src/Storage/Security/AccessTokenVault.cs @@ -91,11 +91,22 @@ public static Task CreateTokenAsync(int userId, TimeSpan timeout, i /// A Task that represents the asynchronous operation and wraps the new AccessToken instance. public static async Task CreateTokenAsync(int userId, TimeSpan timeout, int contentId, string feature, CancellationToken cancellationToken) + { + return await CreateTokenAsync(userId, timeout, contentId, feature, null, cancellationToken) + .ConfigureAwait(false); + } + + /// + /// Creates a new token for the provided user with the specified timeout and a pre-defined value. + /// If is null, a cryptographically random value is generated. + /// + public static async Task CreateTokenAsync(int userId, TimeSpan timeout, int contentId, string feature, + string tokenValue, CancellationToken cancellationToken) { var now = DateTime.UtcNow; var token = new AccessToken { - Value = GenerateTokenValue(), + Value = tokenValue ?? GenerateTokenValue(), UserId = userId, ContentId = contentId, Feature = feature, diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Dockerfile b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Dockerfile new file mode 100644 index 000000000..bbcac4781 --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Dockerfile @@ -0,0 +1,39 @@ +#See https://aka.ms/containerfastmode to understand how Visual Studio uses this Dockerfile to build your images for faster debugging. + +FROM mcr.microsoft.com/dotnet/aspnet:8.0 AS base +WORKDIR /app +EXPOSE 80 +EXPOSE 443 + +FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build +WORKDIR /src +COPY . . + +# ── Build the install-services-core.zip (normally done by CompressInstall.ps1 on Windows) ── +RUN apt-get update && apt-get install -y --no-install-recommends zip \ + && cd /src/nuget/snadmin \ + && mkdir -p install-services-core/import \ + && cp -r install-services/import/* install-services-core/import/ \ + && rm -rf install-services-core/import/'(apps)' \ + install-services-core/import/WebRoot \ + install-services-core/import/ErrorMessages \ + install-services-core/import/WebRoot.Content \ + install-services-core/import/ErrorMessages.Content \ + install-services-core/import/'(apps).Content' \ + && if [ -d install-services/importNetCore ]; then cp -r install-services/importNetCore/* install-services-core/import/ 2>/dev/null || true; fi \ + && cd install-services-core \ + && zip -r /src/Services.Core.Install/install-services-core.zip . \ + && echo "=== install-services-core.zip ===" && unzip -l /src/Services.Core.Install/install-services-core.zip | tail -1 + +WORKDIR "/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth" +RUN dotnet restore "SnWebApplication.Api.PostgreSql.TokenAuth.csproj" +RUN dotnet build "SnWebApplication.Api.PostgreSql.TokenAuth.csproj" -c Release -o /app/build + +FROM build AS publish +RUN dotnet publish "SnWebApplication.Api.PostgreSql.TokenAuth.csproj" -c Release -o /app/publish + +FROM base AS final +RUN apt-get update && apt-get install -y libgdiplus +WORKDIR /app +COPY --from=publish /app/publish . +ENTRYPOINT ["dotnet", "SnWebApplication.Api.PostgreSql.TokenAuth.dll"] diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Program.cs b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Program.cs new file mode 100644 index 000000000..8ea10a651 --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Program.cs @@ -0,0 +1,32 @@ +using System; +using Microsoft.AspNetCore.Hosting; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using Serilog; + +namespace SnWebApplication.Api.PostgreSql.TokenAuth +{ + public class Program + { + public static void Main(string[] args) + { + // Enable legacy timestamp behavior for Npgsql 6+ + // This allows DateTime with Kind=UTC to be written to 'timestamp without time zone' columns. + AppContext.SetSwitch("Npgsql.EnableLegacyTimestampBehavior", true); + + CreateHostBuilder(args).Build().Run(); + } + + public static IHostBuilder CreateHostBuilder(string[] args) => + Host.CreateDefaultBuilder(args) + .ConfigureWebHostDefaults(webBuilder => + { + webBuilder.UseStartup() + .ConfigureLogging(loggingConfiguration => + loggingConfiguration.ClearProviders()); + }) + .UseSerilog((hostingContext, loggerConfiguration) => + loggerConfiguration.ReadFrom + .Configuration(hostingContext.Configuration)); + } +} diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Properties/launchSettings.json b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Properties/launchSettings.json new file mode 100644 index 000000000..bb12091d8 --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Properties/launchSettings.json @@ -0,0 +1,42 @@ +{ + "$schema": "http://json.schemastore.org/launchsettings.json", + "profiles": { + "SnWebApplication.Api.PostgreSql.TokenAuth": { + "commandName": "Project", + "launchBrowser": true, + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development", + "ConnectionStrings__SensenetRepository": "Host=localhost;Database=sensenet-sndb;Username=postgres;Password=SuP3rS3CuR3P4sSw0Rd", + "sensenet__apikeys__healthcheckeruser": "supaS3CUp4ss", + "sensenet__authentication__repositoryUrl": "https://localhost:44362", + "sensenet__authentication__authority": "https://localhost:44311", + "sensenet__authentication__authServerType": "SNAuth", + "sensenet__authentication__AddJwtCookie": "false", + "sensenet__identityManagement__UserProfilesEnabled": "false", + "sensenet__install__postgres__EnableFirstInstallDB": "true" + }, + "applicationUrl": "https://localhost:44362" + }, + "Docker": { + "commandName": "Docker", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development", + "ConnectionStrings__SensenetRepository": "Host=sensenet-postgres;Database=sensenet-sndb;Username=postgres;Password=SuP3rS3CuR3P4sSw0Rd", + "sensenet__apikeys__healthcheckeruser": "supaS3CUp4ss", + "sensenet__authentication__repositoryUrl": "https://localhost:44362", + "sensenet__authentication__authority": "https://localhost:44311", + "sensenet__authentication__authServerType": "SNAuth", + "sensenet__authentication__AddJwtCookie": "false", + "sensenet__identityManagement__UserProfilesEnabled": "false", + "sensenet__authentication__metadatahost": "http://sensenet-snis", + "sensenet__repository__Authentication__ApiKey": "pr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Tedpr3Gen3R4Ted", + "sensenet__install__postgres__EnableFirstInstallDB": "true" + }, + "launchBrowser": true, + "launchUrl": "{Scheme}://{ServiceHost}:{ServicePort}", + "publishAllPorts": true, + "useSSL": true, + "sslPort": 44362 + } + } +} diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/SnWebApplication.Api.PostgreSql.TokenAuth.csproj b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/SnWebApplication.Api.PostgreSql.TokenAuth.csproj new file mode 100644 index 000000000..c61b83b32 --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/SnWebApplication.Api.PostgreSql.TokenAuth.csproj @@ -0,0 +1,36 @@ + + + + net8.0 + a3b5c7d9-1234-4ef0-abcd-567890abcdef + Linux + ..\.. + --network sensenet --name SnWebApplicationPgSql + true + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Startup.cs b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Startup.cs new file mode 100644 index 000000000..f2ed4a3ba --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/Startup.cs @@ -0,0 +1,186 @@ +using System; +using System.IdentityModel.Tokens.Jwt; +using System.IO; +using System.Linq; +using System.Security.Claims; +using Microsoft.AspNetCore.Authentication.JwtBearer; +using Microsoft.AspNetCore.Builder; +using Microsoft.AspNetCore.Hosting; +using Microsoft.AspNetCore.Http; +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Microsoft.IdentityModel.Tokens; +using SenseNet.Configuration; +using SenseNet.ContentRepository; +using SenseNet.ContentRepository.Security.ApiKeys; +using SenseNet.Diagnostics; +using SenseNet.Extensions.DependencyInjection; +using SenseNet.Search.Lucene29; +using SenseNet.Services.Core.Authentication; +using SenseNet.Storage.Data.PgSqlClient; + +namespace SnWebApplication.Api.PostgreSql.TokenAuth +{ + public class Startup + { + public Startup(IConfiguration configuration) + { + Configuration = configuration; + } + + public IConfiguration Configuration { get; } + + // This method gets called by the runtime. Use this method to add services to the container. + public void ConfigureServices(IServiceCollection services) + { + services.AddRazorPages(); + + JwtSecurityTokenHandler.DefaultMapInboundClaims = false; + + // [sensenet]: Authentication + var authOptions = new AuthenticationOptions(); + Configuration.GetSection("sensenet:authentication").Bind(authOptions); + + services.AddAuthentication(JwtBearerDefaults.AuthenticationScheme) + .AddJwtBearer(options => + { + options.RequireHttpsMetadata = false; + options.SaveToken = true; + + if (authOptions.AuthServerType == AuthenticationServerType.SNAuth) + { + options.TokenValidationParameters = new TokenValidationParameters + { + ValidateIssuerSigningKey = false + }; + + var snAuthUrl = !string.IsNullOrEmpty(authOptions.MetadataHost) + ? authOptions.MetadataHost + : authOptions.Authority; + + options.TokenHandlers.Clear(); + options.TokenHandlers.Add(new SenseNetJwtSecurityTokenHandler( + $"{snAuthUrl}/api/auth/validate-token")); + } + else + { + options.Audience = "sensenet"; + + options.Authority = authOptions.Authority; + if (!string.IsNullOrWhiteSpace(authOptions.MetadataHost)) + options.MetadataAddress = + $"{authOptions.MetadataHost.AddUrlSchema().TrimEnd('/')}/.well-known/openid-configuration"; + } + }); + + // [sensenet]: Set options for ApiKeys + services.Configure(Configuration.GetSection("sensenet:ApiKeys")); + + // [sensenet]: Set options for PgSqlSecurityDataProvider + services.AddOptions() + .Configure>((securityOptions, systemConnections) => + securityOptions.ConnectionString = systemConnections.Value.Security); + + // [sensenet]: PostgreSQL connection string mapping + // The default ConfigureConnectionStrings reads "SnCrMsSql" from configuration. + // For PostgreSQL we read "SensenetRepository" (or fallback to "SnCrMsSql"). + services.PostConfigure(options => + { + var pgConnStr = Configuration.GetConnectionString("SensenetRepository"); + if (!string.IsNullOrEmpty(pgConnStr)) + { + options.Repository = pgConnStr; + options.Security ??= pgConnStr; + + var section = Configuration.GetSection("ConnectionStrings"); + options.AllConnectionStrings = section.GetChildren() + .ToDictionary(x => x.Key, x => x.Value); + } + }); + + // [sensenet]: add sensenet services + services + .AddSenseNetInstallPackage() + .AddSenseNet(Configuration, (repositoryBuilder, provider) => + { + var searchEngineLogger = repositoryBuilder.Services.GetService>(); + repositoryBuilder + .UseLogger(provider) + .UseLucene29LocalSearchEngine(searchEngineLogger, + Path.Combine(Environment.CurrentDirectory, "App_Data", "LocalIndex")); + }) + .AddPgSqlSecurityDataProvider() + .AddSenseNetPgSqlProviders(configureInstallation: installOptions => + { + Configuration.Bind("sensenet:install:postgres", installOptions); + }) + .AddSenseNetOData() + .AddSenseNetWebHooks() + .AddSenseNetWopi() + .AddSenseNetSemanticKernel(options => + { + Configuration.Bind("sensenet:ai:SemanticKernel", options); + }) + .AddSenseNetAzureVision(options => + { + Configuration.Bind("sensenet:ai:AzureVision", options); + }); + + // [sensenet]: statistics overrides + var statOptions = new StatisticsOptions(); + Configuration.GetSection("sensenet:statistics").Bind(statOptions); + if (!statOptions.Enabled) + { + // reset to default/null services + services + .AddDefaultStatisticalDataProvider() + .AddDefaultStatisticalDataCollector(); + } + } + + // This method gets called by the runtime. Use this method to configure the HTTP request pipeline. + public void Configure(IApplicationBuilder app, IWebHostEnvironment env) + { + if (env.IsDevelopment()) + { + app.UseDeveloperExceptionPage(); + } + + app.UseRouting(); + + // [sensenet]: custom CORS policy + app.UseSenseNetCors(); + // [sensenet]: use Authentication and set User.Current + app.UseSenseNetAuthentication(); + + // [sensenet]: MembershipExtender middleware + app.UseSenseNetMembershipExtenders(); + + app.UseAuthorization(); + + // [sensenet] Add the sensenet binary handler + app.UseSenseNetFiles(); + + // [sensenet]: Health middleware + app.UseSenseNetHealth(); + // [sensenet]: OData middleware + app.UseSenseNetOdata(); + // [sensenet]: WOPI middleware + app.UseSenseNetWopi(); + + app.UseEndpoints(endpoints => + { + endpoints.MapRazorPages(); + + endpoints.MapGet("/", async context => + { + await context.Response.WriteAsync("sensenet is listening. Visit https://sensenet.com for " + + "more information on how to call the REST API."); + }); + }); + } + } +} diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.Development.json b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.Development.json new file mode 100644 index 000000000..7692412bb --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.Development.json @@ -0,0 +1,14 @@ +{ + "Serilog": { + "WriteTo": [ + { "Name": "Console" }, + { + "Name": "File", + "Args": { + "path": "App_Data/Logs/log-.txt", + "rollingInterval": "Day" + } + } + ] + } +} diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.Production.json b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.Production.json new file mode 100644 index 000000000..0520b9056 --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.Production.json @@ -0,0 +1,16 @@ +{ + "Serilog": { + "WriteTo": [ + { "Name": "Console" }, + { + "Name": "Graylog", + "Args": { + "hostnameOrAddress": "", + "port": "12201", + "transportType": "Udp", + "restrictedToMinimumLevel": "Verbose" + } + } + ] + } +} diff --git a/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.json b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.json new file mode 100644 index 000000000..9826e5adc --- /dev/null +++ b/src/WebApps/SnWebApplication.Api.PostgreSql.TokenAuth/appsettings.json @@ -0,0 +1,42 @@ +{ + "ConnectionStrings": { + "SensenetRepository": "" + }, + "Serilog": { + "Using": [ "Serilog.Sinks.Console", "Serilog.Sinks.File", "Serilog.Sinks.Graylog" ], + "MinimumLevel": { + "Default": "Verbose", + "Override": { + "Microsoft": "Warning", + "System": "Warning" + } + }, + "WriteTo": [ + { "Name": "Console" } + ], + "Properties": { + "Application": "SnWebApplication.Api.PostgreSql.TokenAuth", + "Repository": "" + } + }, + "AllowedHosts": "*", + "sensenet": { + "authentication": { + "authServerType": "SNAuth", + "authority": "https://localhost:44311", + "repositoryUrl": "https://localhost:44362", + "AddJwtCookie": false + }, + "ApiKeys": { + "HealthCheckerUser": "" + }, + "Email": { + "Server": "", + "Port": 587 + }, + "Registration": { + "Groups": [], + "UserType": "" + } + } +} diff --git a/src/WebApps/SnWebApplication.Api.Sql.TokenAuth/Dockerfile b/src/WebApps/SnWebApplication.Api.Sql.TokenAuth/Dockerfile index bed68b138..15eed9674 100644 --- a/src/WebApps/SnWebApplication.Api.Sql.TokenAuth/Dockerfile +++ b/src/WebApps/SnWebApplication.Api.Sql.TokenAuth/Dockerfile @@ -8,6 +8,23 @@ EXPOSE 443 FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build WORKDIR /src COPY . . + +# ── Build the install-services-core.zip (normally done by CompressInstall.ps1 on Windows) ── +RUN apt-get update && apt-get install -y --no-install-recommends zip \ + && cd /src/nuget/snadmin \ + && mkdir -p install-services-core/import \ + && cp -r install-services/import/* install-services-core/import/ \ + && rm -rf install-services-core/import/'(apps)' \ + install-services-core/import/WebRoot \ + install-services-core/import/ErrorMessages \ + install-services-core/import/WebRoot.Content \ + install-services-core/import/ErrorMessages.Content \ + install-services-core/import/'(apps).Content' \ + && if [ -d install-services/importNetCore ]; then cp -r install-services/importNetCore/* install-services-core/import/ 2>/dev/null || true; fi \ + && cd install-services-core \ + && zip -r /src/Services.Core.Install/install-services-core.zip . \ + && echo "=== install-services-core.zip ===" && unzip -l /src/Services.Core.Install/install-services-core.zip | tail -1 + WORKDIR "/src/WebApps/SnWebApplication.Api.Sql.TokenAuth" RUN dotnet restore "SnWebApplication.Api.Sql.TokenAuth.csproj" RUN dotnet build "SnWebApplication.Api.Sql.TokenAuth.csproj" -c Release -o /app/build diff --git a/tools/SnBenchmark/BenchmarkEngine.cs b/tools/SnBenchmark/BenchmarkEngine.cs new file mode 100644 index 000000000..ca27f9787 --- /dev/null +++ b/tools/SnBenchmark/BenchmarkEngine.cs @@ -0,0 +1,143 @@ +namespace SnBenchmark; + +/// +/// The benchmark engine: manages worker tasks, ramp-up schedule, +/// and operation mix. Reports every result to the MetricsCollector. +/// +public sealed class BenchmarkEngine +{ + private readonly BenchmarkOptions _opts; + private readonly SenseNetClient _client; + private readonly MetricsCollector _metrics; + private readonly CancellationTokenSource _cts; + + private int _currentConcurrency; + private long _contentCounter; + private readonly List _workers = new(); + private readonly SemaphoreSlim _workerGate; + + public int CurrentConcurrency => _currentConcurrency; + public bool IsRunning { get; private set; } + + public BenchmarkEngine(BenchmarkOptions opts, SenseNetClient client, + MetricsCollector metrics, CancellationTokenSource cts) + { + _opts = opts; + _client = client; + _metrics = metrics; + _cts = cts; + _currentConcurrency = opts.InitialConcurrency; + _workerGate = new SemaphoreSlim(opts.InitialConcurrency, opts.MaxConcurrency); + } + + // ── Run ───────────────────────────────────────────────────────── + + public async Task RunAsync() + { + IsRunning = true; + var ct = _cts.Token; + + // Spawn initial workers + for (var i = 0; i < _opts.InitialConcurrency; i++) + _workers.Add(Task.Run(() => WorkerLoop(i, ct), ct)); + + // Ramp-up scheduler + var rampUpTask = Task.Run(() => RampUpLoop(ct), ct); + + // Duration limit (if configured) + if (_opts.TestDurationSeconds > 0) + { + _ = Task.Run(async () => + { + await Task.Delay(TimeSpan.FromSeconds(_opts.TestDurationSeconds), ct); + if (!ct.IsCancellationRequested) + await _cts.CancelAsync(); + }, ct); + } + + try + { + await rampUpTask; + } + catch (OperationCanceledException) { } + + // Wait for all workers to finish + try + { + await Task.WhenAll(_workers); + } + catch (OperationCanceledException) { } + + IsRunning = false; + } + + // ── Ramp-up logic ─────────────────────────────────────────────── + + private async Task RampUpLoop(CancellationToken ct) + { + var nextWorkerIndex = _opts.InitialConcurrency; + + while (!ct.IsCancellationRequested) + { + await Task.Delay(TimeSpan.FromSeconds(_opts.RampUpStepSeconds), ct); + + if (_currentConcurrency >= _opts.MaxConcurrency) continue; + + var toAdd = Math.Min( + _opts.RampUpConcurrencyStep, + _opts.MaxConcurrency - _currentConcurrency); + + for (var i = 0; i < toAdd; i++) + { + var idx = nextWorkerIndex++; + _workers.Add(Task.Run(() => WorkerLoop(idx, ct), ct)); + Interlocked.Increment(ref _currentConcurrency); + + // Release one permit on the semaphore for each new worker + try { _workerGate.Release(); } + catch (SemaphoreFullException) { /* at max */ } + } + } + } + + // ── Worker loop ───────────────────────────────────────────────── + + private async Task WorkerLoop(int workerIndex, CancellationToken ct) + { + var totalWeight = _opts.CreateWeight + _opts.QueryWeight; + + while (!ct.IsCancellationRequested) + { + try + { + // Pick operation type based on weight + var roll = Random.Shared.Next(totalWeight); + RequestResult result; + + if (roll < _opts.CreateWeight) + { + var seq = Interlocked.Increment(ref _contentCounter); + var name = $"bench-{workerIndex:D3}-{seq:D8}-{Guid.NewGuid():N}"[..40]; + result = await _client.CreateContentAsync(name, workerIndex, ct); + } + else + { + result = await _client.QueryContentAsync(workerIndex, ct); + } + + _metrics.Record(result); + + // Tiny jitter to avoid thundering herd + await Task.Delay(Random.Shared.Next(5, 50), ct); + } + catch (OperationCanceledException) + { + break; + } + catch + { + // Swallow individual errors; they're recorded in metrics + } + } + } +} diff --git a/tools/SnBenchmark/BenchmarkOptions.cs b/tools/SnBenchmark/BenchmarkOptions.cs new file mode 100644 index 000000000..3707b325b --- /dev/null +++ b/tools/SnBenchmark/BenchmarkOptions.cs @@ -0,0 +1,47 @@ +namespace SnBenchmark; + +/// +/// Benchmark configuration bound from appsettings.json "Benchmark" section. +/// All parameters can be overridden via command-line arguments (--Benchmark:Key=Value). +/// +public sealed class BenchmarkOptions +{ + // ── Connection ────────────────────────────────────────────────── + public string RepositoryUrl { get; set; } = "https://localhost:44362"; + public string ApiKey { get; set; } = string.Empty; + public bool SkipTlsValidation { get; set; } = true; + + // ── Content ───────────────────────────────────────────────────── + public string BasePath { get; set; } = "/Root/Content/Benchmark"; + public string ContentTypeName { get; set; } = "File"; + + // ── Concurrency & ramp-up ─────────────────────────────────────── + /// Starting number of concurrent workers. + public int InitialConcurrency { get; set; } = 2; + + /// Maximum concurrent workers – the ceiling. + public int MaxConcurrency { get; set; } = 64; + + /// Seconds between each ramp-up step. + public int RampUpStepSeconds { get; set; } = 10; + + /// How many workers to add per ramp-up step. + public int RampUpConcurrencyStep { get; set; } = 2; + + // ── Duration & timeout ────────────────────────────────────────── + /// Total benchmark duration in seconds (0 = unlimited, stop with Ctrl+C). + public int TestDurationSeconds { get; set; } = 120; + + /// Per-request HTTP timeout in seconds. + public int RequestTimeoutSeconds { get; set; } = 30; + + // ── Workload mix ──────────────────────────────────────────────── + /// Relative weight of CREATE operations (vs Query). + public int CreateWeight { get; set; } = 70; + + /// Relative weight of QUERY operations (vs Create). + public int QueryWeight { get; set; } = 30; + + // ── Reporting ─────────────────────────────────────────────────── + public string ReportDirectory { get; set; } = "./reports"; +} diff --git a/tools/SnBenchmark/ConsoleUi.cs b/tools/SnBenchmark/ConsoleUi.cs new file mode 100644 index 000000000..e0d9627a1 --- /dev/null +++ b/tools/SnBenchmark/ConsoleUi.cs @@ -0,0 +1,377 @@ +using Spectre.Console; +using Spectre.Console.Rendering; + +namespace SnBenchmark; + +/// +/// Live dashboard rendered with Spectre.Console. +/// Updates every second while the benchmark runs. +/// Includes live sparkline charts for throughput, latency, errors, and concurrency. +/// +public static class ConsoleUi +{ + private const int ChartWidth = 46; + private const int ChartHeight = 7; + + public static async Task RunDashboardAsync( + BenchmarkEngine engine, MetricsCollector metrics, BenchmarkOptions opts, + CancellationToken ct) + { + IRenderable display = new Text("Starting..."); + + await AnsiConsole.Live(display) + .AutoClear(true) + .Overflow(VerticalOverflow.Ellipsis) + .StartAsync(async ctx => + { + while (!ct.IsCancellationRequested && engine.IsRunning) + { + metrics.RecordLiveSample(engine.CurrentConcurrency); + var snap = metrics.GetSnapshot(engine.CurrentConcurrency); + var samples = metrics.GetLiveSamples(); + display = BuildFullDashboard(snap, samples, opts); + ctx.UpdateTarget(display); + + try { await Task.Delay(1000, ct); } + catch (OperationCanceledException) { break; } + } + + // Final render + metrics.RecordLiveSample(engine.CurrentConcurrency); + var finalSnap = metrics.GetSnapshot(engine.CurrentConcurrency); + var finalSamples = metrics.GetLiveSamples(); + display = BuildFullDashboard(finalSnap, finalSamples, opts); + ctx.UpdateTarget(display); + }); + } + + /// + /// Builds the entire dashboard as a vertical stack of renderables: + /// metrics table on top, chart panels below. + /// + private static IRenderable BuildFullDashboard(MetricsSnapshot snap, LiveSample[] samples, + BenchmarkOptions opts) + { + var parts = new List(); + + // ── Metrics table + parts.Add(BuildMetricsTable(snap, opts)); + + // ── Charts section + if (samples.Length > 1) + { + parts.Add(new Text("")); + parts.Add(BuildChartsSection(samples, opts)); + } + + // ── Errors + if (snap.ErrorMessages.Count > 0) + { + parts.Add(new Text("")); + parts.Add(BuildErrorsPanel(snap)); + } + + // ── Load bar + parts.Add(new Text("")); + parts.Add(BuildLoadBar(snap, opts)); + + return new Rows(parts); + } + + // ── Metrics table ─────────────────────────────────────────────── + + private static Table BuildMetricsTable(MetricsSnapshot snap, BenchmarkOptions opts) + { + var t = new Table() + .Border(TableBorder.Double) + .BorderColor(Color.Cyan1) + .Title("[bold cyan]╔══ sensenet Benchmark ══╗[/]") + .Caption($"[dim]Press Ctrl+C to stop │ Target: {opts.RepositoryUrl}[/]") + .AddColumn(new TableColumn("[bold]Metric[/]").Width(24)) + .AddColumn(new TableColumn("[bold]Value[/]").Width(18)) + .AddColumn(new TableColumn("[bold]Metric[/]").Width(24)) + .AddColumn(new TableColumn("[bold]Value[/]").Width(18)); + + t.AddRow( + Lbl("⏱ Elapsed"), Val($"{snap.Elapsed:hh\\:mm\\:ss}"), + Lbl("👥 Concurrency"), ValHighlight($"{snap.CurrentConcurrency} / {opts.MaxConcurrency}")); + + t.AddRow(Rule("Throughput"), Empty(), Rule("Totals"), Empty()); + + t.AddRow( + Lbl("📝 Creates / min"), ValGood($"{snap.CreatesPerMinute:F1}"), + Lbl("📊 Total Requests"), Val($"{snap.TotalRequests:N0}")); + t.AddRow( + Lbl("🔍 Queries / min"), ValGood($"{snap.QueriesPerMinute:F1}"), + Lbl("✅ Successful"), ValGood($"{snap.TotalSuccessful:N0}")); + t.AddRow( + Lbl("⚡ Recent req/s"), ValHighlight($"{snap.RecentRequestsPerSec:F1}"), + Lbl("❌ Failed"), snap.TotalFailed > 0 ? ValBad($"{snap.TotalFailed:N0}") : Val("0")); + + t.AddRow(Rule("Latency (ms)"), Empty(), Rule("Recent (10s)"), Empty()); + + t.AddRow( + Lbl("📝 Avg Create"), LatencyColor(snap.AvgCreateMs), + Lbl("⚡ Create / sec"), ValHighlight($"{snap.RecentCreatesPerSec:F1}")); + t.AddRow( + Lbl("🔍 Avg Query"), LatencyColor(snap.AvgQueryMs), + Lbl("⚡ Query / sec"), ValHighlight($"{snap.RecentQueriesPerSec:F1}")); + t.AddRow( + Lbl("📝 P95 Create"), LatencyColor(snap.P95CreateMs), + Lbl("📝 Recent Avg Create"), LatencyColor(snap.RecentAvgCreateMs)); + t.AddRow( + Lbl("🔍 P95 Query"), LatencyColor(snap.P95QueryMs), + Lbl("🔍 Recent Avg Query"), LatencyColor(snap.RecentAvgQueryMs)); + t.AddRow( + Lbl("📝 P99 Create"), LatencyColor(snap.P99CreateMs), + Lbl("🔥 Recent Error %"), + snap.RecentErrorRate > 5 ? ValBad($"{snap.RecentErrorRate:F1}%") : ValGood($"{snap.RecentErrorRate:F1}%")); + t.AddRow( + Lbl("📝 Max Create"), LatencyColor(snap.MaxCreateMs), + Lbl("🔍 Max Query"), LatencyColor(snap.MaxQueryMs)); + + return t; + } + + // ── Charts section ────────────────────────────────────────────── + + private static IRenderable BuildChartsSection(LiveSample[] samples, BenchmarkOptions opts) + { + var tMax = samples.Max(s => s.RequestsPerSec); + var tCur = samples.Last().RequestsPerSec; + var lMax = samples.Max(s => s.AvgLatencyMs); + var lCur = samples.Last().AvgLatencyMs; + var eMax = samples.Max(s => s.ErrorCount); + var eCur = samples.Last().ErrorCount; + var cCur = samples.Last().Concurrency; + + // ── Row 1: Throughput + Latency + var throughputPanel = new Panel( + new Rows( + BuildSparkChart(samples.Select(s => (double)s.RequestsPerSec).ToArray(), "green", "darkgreen"), + new Markup($" [dim]now:[/] [bold green]{tCur}[/] [dim]peak:[/] [bold green]{tMax}[/]"))) + .Header("[bold green]📈 Throughput (req/s)[/]") + .Border(BoxBorder.Rounded) + .BorderColor(Color.Green) + .Expand(); + + var latencyPanel = new Panel( + new Rows( + BuildSparkChart(samples.Select(s => s.AvgLatencyMs).ToArray(), "yellow", "orange1"), + new Markup($" [dim]now:[/] [bold yellow]{lCur:F0}ms[/] [dim]peak:[/] [bold yellow]{lMax:F0}ms[/]"))) + .Header("[bold yellow]🕐 Latency (ms)[/]") + .Border(BoxBorder.Rounded) + .BorderColor(Color.Yellow) + .Expand(); + + // ── Row 2: Errors + Concurrency + var errorPanel = new Panel( + new Rows( + BuildSparkChart(samples.Select(s => (double)s.ErrorCount).ToArray(), "red", "darkred"), + new Markup($" [dim]now:[/] [bold red]{eCur}[/] [dim]peak:[/] [bold red]{eMax}[/]"))) + .Header("[bold red]❌ Errors/s[/]") + .Border(BoxBorder.Rounded) + .BorderColor(Color.Red) + .Expand(); + + var concurrencyPanel = new Panel( + new Rows( + BuildSparkChart(samples.Select(s => (double)s.Concurrency).ToArray(), "magenta1", "purple"), + new Markup($" [dim]now:[/] [bold magenta1]{cCur}[/] [dim]max:[/] [bold magenta1]{opts.MaxConcurrency}[/]"))) + .Header("[bold magenta1]👥 Concurrency[/]") + .Border(BoxBorder.Rounded) + .BorderColor(Color.Magenta1) + .Expand(); + + var chartsGrid = new Grid() + .AddColumn(new GridColumn().NoWrap()) + .AddColumn(new GridColumn().NoWrap()); + + chartsGrid.AddRow(throughputPanel, latencyPanel); + chartsGrid.AddRow(errorPanel, concurrencyPanel); + + return chartsGrid; + } + + // ── Errors panel ──────────────────────────────────────────────── + + private static Panel BuildErrorsPanel(MetricsSnapshot snap) + { + var rows = new List(); + foreach (var (msg, count) in snap.ErrorMessages) + { + rows.Add(new Markup($" [red]{Markup.Escape(msg)}[/] [bold red]×{count}[/]")); + } + + return new Panel(new Rows(rows)) + .Header("[bold red]⚠ Error Details[/]") + .Border(BoxBorder.Rounded) + .BorderColor(Color.Red); + } + + // ── Load bar ──────────────────────────────────────────────────── + + private static IRenderable BuildLoadBar(MetricsSnapshot snap, BenchmarkOptions opts) + { + var pct = (double)snap.CurrentConcurrency / opts.MaxConcurrency * 100; + var barLen = 40; + var filled = (int)(pct / 100 * barLen); + var bar = new string('█', filled) + new string('░', barLen - filled); + var barColor = pct < 50 ? "green" : pct < 80 ? "yellow" : "red"; + + return new Markup($" [bold]🔥 Load:[/] [{barColor}]{bar}[/] [{barColor} bold]{pct:F0}%[/] [{barColor}]{snap.CurrentConcurrency}/{opts.MaxConcurrency} workers[/]"); + } + + // ── Spark chart builder ───────────────────────────────────────── + + private static Markup BuildSparkChart(double[] data, string mainColor, string dimColor) + { + if (data.Length == 0) + return new Markup("[dim]waiting for data…[/]"); + + var resampled = Resample(data, ChartWidth); + var maxVal = resampled.Max(); + if (maxVal < 0.001) maxVal = 1; + + var blocks = new[] { ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█' }; + var lines = new string[ChartHeight]; + + for (var row = ChartHeight - 1; row >= 0; row--) + { + var chars = new char[resampled.Length]; + for (var col = 0; col < resampled.Length; col++) + { + var normalized = resampled[col] / maxVal; + var totalLevels = ChartHeight * 8; + var level = (int)(normalized * totalLevels); + var rowBase = row * 8; + var inRow = level - rowBase; + + chars[col] = inRow <= 0 ? ' ' : inRow >= 8 ? '█' : blocks[inRow]; + } + lines[ChartHeight - 1 - row] = new string(chars); + } + + var sb = new System.Text.StringBuilder(); + for (var i = 0; i < lines.Length; i++) + { + var escaped = Markup.Escape(lines[i]); + var color = i < 3 ? mainColor : dimColor; + sb.Append($"[{color}]{escaped}[/]"); + if (i < lines.Length - 1) sb.AppendLine(); + } + + return new Markup(sb.ToString()); + } + + // ── Helpers ────────────────────────────────────────────────────── + + private static double[] Resample(double[] data, int targetLen) + { + if (data.Length == 0) return new double[targetLen]; + if (data.Length <= targetLen) + { + var padded = new double[targetLen]; + var offset = targetLen - data.Length; + Array.Copy(data, 0, padded, offset, data.Length); + return padded; + } + + var result = new double[targetLen]; + var ratio = (double)data.Length / targetLen; + for (var i = 0; i < targetLen; i++) + { + var start = (int)(i * ratio); + var end = (int)((i + 1) * ratio); + if (end > data.Length) end = data.Length; + if (start >= end) { result[i] = data[start]; continue; } + result[i] = 0; + for (var j = start; j < end; j++) result[i] += data[j]; + result[i] /= (end - start); + } + return result; + } + + private static Markup Lbl(string t) => new($"[bold]{Markup.Escape(t)}[/]"); + private static Markup Val(string t) => new($"[white]{Markup.Escape(t)}[/]"); + private static Markup ValGood(string t) => new($"[bold green]{Markup.Escape(t)}[/]"); + private static Markup ValBad(string t) => new($"[bold red]{Markup.Escape(t)}[/]"); + private static Markup ValHighlight(string t) => new($"[bold yellow]{Markup.Escape(t)}[/]"); + private static IRenderable Empty() => new Text(""); + private static IRenderable Rule(string label) => + new Spectre.Console.Rule($"[dim]{Markup.Escape(label)}[/]").RuleStyle("grey"); + + private static Markup LatencyColor(double ms) => ms switch + { + 0 => new("[dim]—[/]"), + < 100 => new($"[bold green]{ms:F0} ms[/]"), + < 500 => new($"[bold yellow]{ms:F0} ms[/]"), + < 2000 => new($"[bold orange1]{ms:F0} ms[/]"), + _ => new($"[bold red]{ms:F0} ms[/]") + }; + + // ── Banner ────────────────────────────────────────────────────── + + public static void PrintBanner(BenchmarkOptions opts) + { + AnsiConsole.Write(new FigletText("SnBenchmark") + .Color(Color.Cyan1).Centered()); + + AnsiConsole.Write(new Spectre.Console.Rule("[cyan]sensenet Repository Stress Test Tool[/]") + .RuleStyle("cyan").DoubleBorder()); + + var configPanel = new Panel( + new Rows( + new Markup($"[bold]Repository:[/] [cyan]{Markup.Escape(opts.RepositoryUrl)}[/]"), + new Markup($"[bold]Base path:[/] [cyan]{Markup.Escape(opts.BasePath)}[/]"), + new Markup($"[bold]Content type:[/] [cyan]{Markup.Escape(opts.ContentTypeName)}[/]"), + new Markup($"[bold]Concurrency:[/] [yellow]{opts.InitialConcurrency}[/] → [yellow]{opts.MaxConcurrency}[/] (step: [yellow]+{opts.RampUpConcurrencyStep}[/] every [yellow]{opts.RampUpStepSeconds}s[/])"), + new Markup($"[bold]Duration:[/] [yellow]{(opts.TestDurationSeconds > 0 ? $"{opts.TestDurationSeconds}s" : "unlimited (Ctrl+C)")}[/]"), + new Markup($"[bold]Mix:[/] Create [yellow]{opts.CreateWeight}%[/] / Query [yellow]{opts.QueryWeight}%[/]"), + new Markup($"[bold]Timeout:[/] [yellow]{opts.RequestTimeoutSeconds}s[/] per request") + )) + .Header("[bold cyan]⚙ Configuration[/]") + .Border(BoxBorder.Rounded) + .BorderColor(Color.Grey); + + AnsiConsole.Write(configPanel); + AnsiConsole.WriteLine(); + } + + // ── Final summary ─────────────────────────────────────────────── + + public static void PrintFinalSummary(MetricsSnapshot snap) + { + AnsiConsole.WriteLine(); + AnsiConsole.Write(new Spectre.Console.Rule("[bold green]✔ Benchmark Complete[/]") + .RuleStyle("green").DoubleBorder()); + + var st = new Table() + .Border(TableBorder.Rounded) + .BorderColor(Color.Green) + .AddColumn("[bold]Metric[/]") + .AddColumn("[bold]Value[/]"); + + st.AddRow("Duration", $"{snap.Elapsed:hh\\:mm\\:ss}"); + st.AddRow("Peak Concurrency", $"{snap.CurrentConcurrency}"); + st.AddRow("Total Requests", $"{snap.TotalRequests:N0}"); + st.AddRow("Successful", $"[green]{snap.TotalSuccessful:N0}[/]"); + st.AddRow("Failed", snap.TotalFailed > 0 ? $"[red]{snap.TotalFailed:N0}[/]" : "[green]0[/]"); + st.AddRow("Creates / min", $"{snap.CreatesPerMinute:F1}"); + st.AddRow("Queries / min", $"{snap.QueriesPerMinute:F1}"); + st.AddRow("Avg Create Latency", $"{snap.AvgCreateMs:F0} ms"); + st.AddRow("Avg Query Latency", $"{snap.AvgQueryMs:F0} ms"); + st.AddRow("P95 Create Latency", $"{snap.P95CreateMs:F0} ms"); + st.AddRow("P99 Create Latency", $"{snap.P99CreateMs:F0} ms"); + st.AddRow("Max Create Latency", $"{snap.MaxCreateMs:F0} ms"); + + if (snap.ErrorMessages.Count > 0) + { + st.AddEmptyRow(); + foreach (var (msg, count) in snap.ErrorMessages) + st.AddRow($"[red]Error[/]", $"[red]{Markup.Escape(msg)} (×{count})[/]"); + } + + AnsiConsole.Write(st); + } +} diff --git a/tools/SnBenchmark/MetricsCollector.cs b/tools/SnBenchmark/MetricsCollector.cs new file mode 100644 index 000000000..5522f6777 --- /dev/null +++ b/tools/SnBenchmark/MetricsCollector.cs @@ -0,0 +1,231 @@ +using System.Collections.Concurrent; + +namespace SnBenchmark; + +/// +/// Thread-safe metrics collector. Every worker pushes results here; +/// the UI reads aggregated snapshots. +/// +public sealed class MetricsCollector +{ + private readonly ConcurrentBag _results = new(); + private long _totalRequests; + private long _totalSuccessful; + private long _totalFailed; + private long _totalCreates; + private long _totalQueries; + private readonly DateTime _startTime = DateTime.UtcNow; + + // ── Rolling history for live charts (1 sample per second) ─────── + private readonly ConcurrentQueue _liveSamples = new(); + private const int MaxLiveSamples = 120; // 2 minutes of history + + /// + /// Called once per second from the dashboard loop to record a live sample. + /// + public void RecordLiveSample(int concurrency) + { + var now = DateTime.UtcNow; + var window = now.AddSeconds(-1); + var results = _results.ToArray(); + var recent = results.Where(r => r.Timestamp > window).ToArray(); + + _liveSamples.Enqueue(new LiveSample + { + Timestamp = now, + RequestsPerSec = recent.Length, + AvgLatencyMs = recent.Length > 0 ? recent.Average(r => r.ElapsedMs) : 0, + ErrorCount = recent.Count(r => !r.Success), + Concurrency = concurrency, + CreatesPerSec = recent.Count(r => r.Operation == OperationType.Create), + QueriesPerSec = recent.Count(r => r.Operation == OperationType.Query), + }); + + while (_liveSamples.Count > MaxLiveSamples) + _liveSamples.TryDequeue(out _); + } + + public LiveSample[] GetLiveSamples() => _liveSamples.ToArray(); + + // ── Record ────────────────────────────────────────────────────── + + public void Record(RequestResult result) + { + _results.Add(result); + Interlocked.Increment(ref _totalRequests); + + if (result.Success) + Interlocked.Increment(ref _totalSuccessful); + else + Interlocked.Increment(ref _totalFailed); + + if (result.Operation == OperationType.Create) + Interlocked.Increment(ref _totalCreates); + else + Interlocked.Increment(ref _totalQueries); + } + + // ── Snapshots ─────────────────────────────────────────────────── + + public MetricsSnapshot GetSnapshot(int currentConcurrency) + { + var elapsed = DateTime.UtcNow - _startTime; + var results = _results.ToArray(); + + var createResults = results.Where(r => r.Operation == OperationType.Create).ToArray(); + var queryResults = results.Where(r => r.Operation == OperationType.Query).ToArray(); + + var recentWindow = DateTime.UtcNow.AddSeconds(-10); + var recentResults = results.Where(r => r.Timestamp > recentWindow).ToArray(); + var recentCreates = recentResults.Where(r => r.Operation == OperationType.Create).ToArray(); + var recentQueries = recentResults.Where(r => r.Operation == OperationType.Query).ToArray(); + + return new MetricsSnapshot + { + Elapsed = elapsed, + TotalRequests = Interlocked.Read(ref _totalRequests), + TotalSuccessful = Interlocked.Read(ref _totalSuccessful), + TotalFailed = Interlocked.Read(ref _totalFailed), + TotalCreates = Interlocked.Read(ref _totalCreates), + TotalQueries = Interlocked.Read(ref _totalQueries), + CurrentConcurrency = currentConcurrency, + + AvgCreateMs = createResults.Length > 0 + ? createResults.Average(r => r.ElapsedMs) : 0, + AvgQueryMs = queryResults.Length > 0 + ? queryResults.Average(r => r.ElapsedMs) : 0, + P95CreateMs = Percentile(createResults, 0.95), + P95QueryMs = Percentile(queryResults, 0.95), + P99CreateMs = Percentile(createResults, 0.99), + P99QueryMs = Percentile(queryResults, 0.99), + MaxCreateMs = createResults.Length > 0 + ? createResults.Max(r => r.ElapsedMs) : 0, + MaxQueryMs = queryResults.Length > 0 + ? queryResults.Max(r => r.ElapsedMs) : 0, + + CreatesPerMinute = elapsed.TotalMinutes > 0 + ? Interlocked.Read(ref _totalCreates) / elapsed.TotalMinutes : 0, + QueriesPerMinute = elapsed.TotalMinutes > 0 + ? Interlocked.Read(ref _totalQueries) / elapsed.TotalMinutes : 0, + + RecentRequestsPerSec = recentResults.Length / 10.0, + RecentCreatesPerSec = recentCreates.Length / 10.0, + RecentQueriesPerSec = recentQueries.Length / 10.0, + RecentAvgCreateMs = recentCreates.Length > 0 + ? recentCreates.Average(r => r.ElapsedMs) : 0, + RecentAvgQueryMs = recentQueries.Length > 0 + ? recentQueries.Average(r => r.ElapsedMs) : 0, + RecentErrorRate = recentResults.Length > 0 + ? (double)recentResults.Count(r => !r.Success) / recentResults.Length * 100 : 0, + + ErrorMessages = results + .Where(r => !r.Success && r.ErrorMessage != null) + .GroupBy(r => TruncateError(r.ErrorMessage!)) + .OrderByDescending(g => g.Count()) + .Take(5) + .ToDictionary(g => g.Key, g => g.Count()), + }; + } + + public IReadOnlyList GetAllResults() => _results.ToArray(); + + // ── Time-series for report ────────────────────────────────────── + + public IReadOnlyList GetTimeSlices(int intervalSeconds = 5) + { + var results = _results.ToArray(); + if (results.Length == 0) return []; + + var minTime = results.Min(r => r.Timestamp); + var maxTime = results.Max(r => r.Timestamp); + var slices = new List(); + + for (var t = minTime; t < maxTime; t = t.AddSeconds(intervalSeconds)) + { + var windowEnd = t.AddSeconds(intervalSeconds); + var window = results.Where(r => r.Timestamp >= t && r.Timestamp < windowEnd).ToArray(); + + slices.Add(new TimeSlice + { + OffsetSeconds = (t - minTime).TotalSeconds, + RequestCount = window.Length, + SuccessCount = window.Count(r => r.Success), + FailCount = window.Count(r => !r.Success), + AvgLatencyMs = window.Length > 0 ? window.Average(r => r.ElapsedMs) : 0, + CreateCount = window.Count(r => r.Operation == OperationType.Create), + QueryCount = window.Count(r => r.Operation == OperationType.Query), + }); + } + + return slices; + } + + // ── Helpers ────────────────────────────────────────────────────── + + private static double Percentile(RequestResult[] sorted, double p) + { + if (sorted.Length == 0) return 0; + var ordered = sorted.OrderBy(r => r.ElapsedMs).ToArray(); + var index = (int)Math.Ceiling(p * ordered.Length) - 1; + return ordered[Math.Max(0, index)].ElapsedMs; + } + + private static string TruncateError(string msg) => + msg.Length > 80 ? msg[..80] + "…" : msg; +} + +// ── Snapshot DTO ──────────────────────────────────────────────────── + +public sealed class MetricsSnapshot +{ + public TimeSpan Elapsed { get; init; } + public long TotalRequests { get; init; } + public long TotalSuccessful { get; init; } + public long TotalFailed { get; init; } + public long TotalCreates { get; init; } + public long TotalQueries { get; init; } + public int CurrentConcurrency { get; init; } + + public double AvgCreateMs { get; init; } + public double AvgQueryMs { get; init; } + public double P95CreateMs { get; init; } + public double P95QueryMs { get; init; } + public double P99CreateMs { get; init; } + public double P99QueryMs { get; init; } + public double MaxCreateMs { get; init; } + public double MaxQueryMs { get; init; } + + public double CreatesPerMinute { get; init; } + public double QueriesPerMinute { get; init; } + + public double RecentRequestsPerSec { get; init; } + public double RecentCreatesPerSec { get; init; } + public double RecentQueriesPerSec { get; init; } + public double RecentAvgCreateMs { get; init; } + public double RecentAvgQueryMs { get; init; } + public double RecentErrorRate { get; init; } + + public Dictionary ErrorMessages { get; init; } = new(); +} + +public sealed class TimeSlice +{ + public double OffsetSeconds { get; init; } + public int RequestCount { get; init; } + public int SuccessCount { get; init; } + public int FailCount { get; init; } + public double AvgLatencyMs { get; init; } + public int CreateCount { get; init; } + public int QueryCount { get; init; } +} + +public sealed class LiveSample +{ + public DateTime Timestamp { get; init; } + public int RequestsPerSec { get; init; } + public double AvgLatencyMs { get; init; } + public int ErrorCount { get; init; } + public int Concurrency { get; init; } + public int CreatesPerSec { get; init; } + public int QueriesPerSec { get; init; } +} diff --git a/tools/SnBenchmark/Program.cs b/tools/SnBenchmark/Program.cs new file mode 100644 index 000000000..0e871bba1 --- /dev/null +++ b/tools/SnBenchmark/Program.cs @@ -0,0 +1,121 @@ +using Microsoft.Extensions.Configuration; +using Spectre.Console; +using SnBenchmark; + +// ── Configuration ─────────────────────────────────────────────────── + +var config = new ConfigurationBuilder() + .SetBasePath(Directory.GetCurrentDirectory()) + .AddJsonFile("appsettings.json", optional: true) + .AddEnvironmentVariables("SNBENCH_") + .AddCommandLine(args) + .Build(); + +var opts = new BenchmarkOptions(); +config.GetSection("Benchmark").Bind(opts); + +// ── Validate ──────────────────────────────────────────────────────── + +if (string.IsNullOrWhiteSpace(opts.ApiKey)) +{ + AnsiConsole.MarkupLine("[bold red]Error:[/] API key is required."); + AnsiConsole.MarkupLine("[dim]Set it in appsettings.json, via --Benchmark:ApiKey=xxx, or SNBENCH_Benchmark__ApiKey env var.[/]"); + return 1; +} + +if (string.IsNullOrWhiteSpace(opts.RepositoryUrl)) +{ + AnsiConsole.MarkupLine("[bold red]Error:[/] Repository URL is required."); + return 1; +} + +// ── Banner ────────────────────────────────────────────────────────── + +ConsoleUi.PrintBanner(opts); + +// ── Ctrl+C handler ────────────────────────────────────────────────── + +using var cts = new CancellationTokenSource(); +Console.CancelKeyPress += (_, e) => +{ + e.Cancel = true; + AnsiConsole.MarkupLine("\n[bold yellow]⚠ Stopping benchmark gracefully…[/]"); + cts.Cancel(); +}; + +// ── Pre-flight checks ─────────────────────────────────────────────── + +using var client = new SenseNetClient(opts); + +var alive = await AnsiConsole.Status() + .Spinner(Spinner.Known.Dots) + .StartAsync("[cyan]Checking repository connectivity…[/]", + async _ => await client.IsAliveAsync(cts.Token)); + +if (!alive) +{ + AnsiConsole.MarkupLine($"[bold red]✗ Cannot reach repository at {Markup.Escape(opts.RepositoryUrl)}[/]"); + AnsiConsole.MarkupLine("[dim]Check the URL, network, and TLS settings.[/]"); + return 2; +} + +AnsiConsole.MarkupLine("[bold green]✓ Repository is reachable[/]"); + +// ── Ensure base folder ────────────────────────────────────────────── + +try +{ + await AnsiConsole.Status() + .Spinner(Spinner.Known.Dots) + .StartAsync($"[cyan]Ensuring benchmark folder {Markup.Escape(opts.BasePath)}…[/]", + async _ => await client.EnsureBaseFolderAsync(cts.Token)); + + AnsiConsole.MarkupLine($"[bold green]✓ Benchmark folder ready[/]"); +} +catch (Exception ex) +{ + AnsiConsole.MarkupLine($"[bold red]✗ Failed to create benchmark folder: {Markup.Escape(ex.Message)}[/]"); + return 3; +} + +AnsiConsole.WriteLine(); +AnsiConsole.MarkupLine("[bold cyan]🚀 Starting benchmark…[/]"); +AnsiConsole.MarkupLine("[dim]Press Ctrl+C to stop at any time.[/]"); +AnsiConsole.WriteLine(); + +// ── Run benchmark ─────────────────────────────────────────────────── + +var metrics = new MetricsCollector(); +var engine = new BenchmarkEngine(opts, client, metrics, cts); + +// Start engine in background +var engineTask = Task.Run(() => engine.RunAsync(), cts.Token); + +// Run live dashboard (blocks until done) +await ConsoleUi.RunDashboardAsync(engine, metrics, opts, cts.Token); + +// Wait for engine to finish +try { await engineTask; } +catch (OperationCanceledException) { } + +// ── Final summary ─────────────────────────────────────────────────── + +var finalSnap = metrics.GetSnapshot(engine.CurrentConcurrency); +ConsoleUi.PrintFinalSummary(finalSnap); + +// ── Generate report ───────────────────────────────────────────────── + +try +{ + var reportPath = await ReportGenerator.GenerateAsync(opts, metrics, engine.CurrentConcurrency); + AnsiConsole.WriteLine(); + AnsiConsole.MarkupLine($"[bold green]📄 Report saved:[/] [link]{Markup.Escape(reportPath)}[/]"); +} +catch (Exception ex) +{ + AnsiConsole.MarkupLine($"[bold red]⚠ Failed to generate report: {Markup.Escape(ex.Message)}[/]"); +} + +AnsiConsole.WriteLine(); +AnsiConsole.MarkupLine("[bold cyan]Done. 👋[/]"); +return 0; diff --git a/tools/SnBenchmark/README.md b/tools/SnBenchmark/README.md new file mode 100644 index 000000000..3d933488f --- /dev/null +++ b/tools/SnBenchmark/README.md @@ -0,0 +1,309 @@ +# SnBenchmark — sensenet Repository Stress Test Tool + +A multi-threaded benchmark console application that stress-tests a running +[sensenet](https://github.com/SenseNet/sensenet) repository through its OData +REST API. It creates content, runs queries, gradually ramps up concurrency, and +produces a live dashboard with sparkline charts plus a detailed Markdown report +at the end. + +![.NET 10](https://img.shields.io/badge/.NET-10.0-blue) +![Spectre.Console](https://img.shields.io/badge/Spectre.Console-0.49-purple) + +--- + +## Features + +| Feature | Description | +|---------|-------------| +| **Gradual ramp-up** | Starts with a small number of workers and adds more at configurable intervals until a ceiling is reached. | +| **Configurable workload mix** | Weighted ratio of `Create` vs `Query` operations (default 70 / 30). | +| **Live console dashboard** | Real-time metrics table, load bar, and Unicode sparkline charts (throughput, latency, errors, concurrency) powered by [Spectre.Console](https://spectreconsole.net). | +| **Percentile latencies** | Tracks Avg, P95, P99, and Max for both create and query operations. | +| **Rolling "recent" window** | Last-10-second stats so you can see how performance changes as load increases. | +| **Markdown report** | Auto-generated report with ASCII charts, latency histograms, time-series tables, and error summaries. | +| **Graceful shutdown** | Press Ctrl+C at any time; the tool stops workers, prints a final summary, and writes the report. | +| **Flexible configuration** | `appsettings.json`, environment variables (`SNBENCH_`), or command-line args — all three are merged. | + +--- + +## Prerequisites + +- [.NET 10 SDK](https://dotnet.microsoft.com/download) (or later) +- A running sensenet repository with the OData API enabled +- A valid API key with content creation permissions + +--- + +## Quick Start + +```bash +cd tools/SnBenchmark + +# 1. Edit settings (at minimum set the API key) +# Or pass it on the command line (see below) +nano appsettings.json + +# 2. Run +dotnet run +``` + +Alternatively, pass settings via the command line: + +```bash +dotnet run -- \ + --Benchmark:RepositoryUrl=https://localhost:44362 \ + --Benchmark:ApiKey=YOUR_API_KEY_HERE \ + --Benchmark:MaxConcurrency=128 \ + --Benchmark:TestDurationSeconds=300 +``` + +Or via environment variables (prefix `SNBENCH_`, double underscore for nesting): + +```bash +export SNBENCH_Benchmark__ApiKey=YOUR_API_KEY_HERE +export SNBENCH_Benchmark__MaxConcurrency=128 +dotnet run +``` + +--- + +## Configuration Reference + +All settings live under the `"Benchmark"` section in `appsettings.json`. + +### Connection + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `RepositoryUrl` | string | `https://localhost:44362` | Base URL of the sensenet repository. | +| `ApiKey` | string | *(required)* | API key sent as the `apikey` header on every request. | +| `SkipTlsValidation` | bool | `true` | Ignore TLS certificate errors (useful for local dev with self-signed certs). | + +### Content + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `BasePath` | string | `/Root/Content/Benchmark` | Repository path where benchmark content is created. The folder is auto-created if it doesn't exist. | +| `ContentTypeName` | string | `File` | sensenet content type to create. | + +### Concurrency & Ramp-Up + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `InitialConcurrency` | int | `2` | Number of concurrent workers at start. | +| `MaxConcurrency` | int | `64` | Upper ceiling for concurrent workers. | +| `RampUpStepSeconds` | int | `10` | Seconds between each ramp-up step. | +| `RampUpConcurrencyStep` | int | `2` | Number of workers added per ramp-up step. | + +> **Example:** With defaults, concurrency goes 2 → 4 → 6 → … → 64, stepping +> every 10 seconds. It takes `(64-2)/2 × 10 = 310s` (~5 min) to reach the +> maximum. + +### Duration & Timeout + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `TestDurationSeconds` | int | `120` | Total test duration in seconds. Set to `0` for unlimited (stop manually with Ctrl+C). | +| `RequestTimeoutSeconds` | int | `30` | HTTP timeout per individual request. Requests exceeding this are cancelled and counted as errors. | + +### Workload Mix + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `CreateWeight` | int | `70` | Relative weight of content creation operations. | +| `QueryWeight` | int | `30` | Relative weight of OData query operations. | + +> The weights don't need to sum to 100 — they are treated as proportions. +> `70 / 30` is the same as `7 / 3`. + +### Reporting + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `ReportDirectory` | string | `./reports` | Directory where Markdown reports are saved. Created automatically if missing. | + +--- + +## How It Works + +``` +┌─────────────┐ +│ Program │ Loads config, validates, runs pre-flight checks +└──────┬──────┘ + │ + ▼ +┌─────────────┐ ┌──────────────────┐ +│ Benchmark │────▶│ SenseNetClient │ HTTP calls to OData API +│ Engine │ └──────────────────┘ +│ │ POST /OData.svc/('BasePath') +│ Workers ×N │ GET /OData.svc/('BasePath')?$filter=... +└──────┬──────┘ + │ records timing + ▼ +┌─────────────┐ ┌──────────────────┐ +│ Metrics │────▶│ ConsoleUi │ Live Spectre.Console dashboard +│ Collector │ └──────────────────┘ +│ │ +│ (thread- │ ┌──────────────────┐ +│ safe) │────▶│ ReportGenerator │ Markdown report with charts +└─────────────┘ └──────────────────┘ +``` + +### Execution Flow + +1. **Configuration** — Merges `appsettings.json` + env vars + CLI args. +2. **Validation** — Checks that `ApiKey` and `RepositoryUrl` are set. +3. **Pre-flight** — Pings the repository; ensures the benchmark folder exists. +4. **Engine start** — Spawns `InitialConcurrency` workers. Each worker loops: + pick a random operation (create or query based on weights), execute it, record + timing in `MetricsCollector`, repeat. +5. **Ramp-up** — A background timer adds `RampUpConcurrencyStep` workers every + `RampUpStepSeconds` until `MaxConcurrency` is reached. +6. **Dashboard** — Updates every second with real-time metrics and sparkline + charts. +7. **Shutdown** — On duration expiry or Ctrl+C: cancels workers, prints a final + summary table, and writes the Markdown report to `ReportDirectory`. + +### Metrics Tracked + +- **Throughput:** total creates/min, queries/min, recent req/s +- **Latency:** average, P95, P99, max — separately for creates and queries +- **Rolling window:** last 10 seconds of creates/sec, queries/sec, avg latency, + error rate +- **Live samples:** up to 120 seconds of per-second snapshots for chart + rendering +- **Errors:** count + grouped error messages + +--- + +## Live Dashboard + +The dashboard uses [Spectre.Console](https://spectreconsole.net) `Live` rendering +and updates every second: + +``` +╔════════════════════════ sensenet Benchmark ═════════════════════════╗ +║ Metric │ Value │ Metric │ Value ║ +║─────────────────────┼─────────────┼──────────────────────┼─────────║ +║ ⏱ Elapsed │ 00:01:23 │ 👥 Concurrency │ 18 / 64 ║ +║ 📝 Creates / min │ 2,284.7 │ 📊 Total Requests │ 4,123 ║ +║ ... │ │ ... │ ║ +╚════════════════════════════════════════════════════════════════════╝ + +╭─📈 Throughput (req/s)────────────╮ ╭─🕐 Latency (ms)─────────────────╮ +│ ▁▂▃▃▄▅▅▆▆▇▇▇████████████████ │ │ █▇▆▅▅▄▄▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃ │ +│ now: 42 peak: 48 │ │ now: 189ms peak: 488ms │ +╰──────────────────────────────────╯ ╰─────────────────────────────────╯ +╭─❌ Errors/s─────────────────────╮ ╭─👥 Concurrency─────────────────╮ +│ ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁ │ │ ▁▁▂▂▃▃▄▄▅▅▆▆▇▇████████████ │ +│ now: 0 peak: 1 │ │ now: 18 max: 64 │ +╰──────────────────────────────────╯ ╰─────────────────────────────────╯ + + 🔥 Load: █████████████░░░░░░░░░░░░░░░░░░░ 28% 18/64 workers +``` + +--- + +## Report + +After each run a Markdown report is written to the `ReportDirectory`: + +``` +reports/ + benchmark-report-20260228-155235.md +``` + +The report contains: + +- Configuration summary +- Final metrics table +- ASCII throughput and latency time-series charts +- Latency distribution histogram +- Per-second time-series data table +- Error summary + +--- + +## Project Structure + +``` +tools/SnBenchmark/ +├── Program.cs Entry point, config loading, pre-flight, main loop +├── BenchmarkOptions.cs Strongly-typed configuration model +├── BenchmarkEngine.cs Worker management, ramp-up scheduler, operation mix +├── SenseNetClient.cs OData HTTP client (create, query, health check) +├── MetricsCollector.cs Thread-safe metrics aggregation & live samples +├── ConsoleUi.cs Spectre.Console live dashboard with sparkline charts +├── ReportGenerator.cs Markdown report writer with ASCII charts +├── appsettings.json Default configuration +├── SnBenchmark.csproj Project file (.NET 10, Spectre.Console) +└── reports/ Generated reports (git-ignored) +``` + +--- + +## Example Scenarios + +### Light smoke test (30 seconds, low concurrency) + +```bash +dotnet run -- \ + --Benchmark:MaxConcurrency=4 \ + --Benchmark:TestDurationSeconds=30 +``` + +### Aggressive stress test (5 min, 128 workers, fast ramp) + +```bash +dotnet run -- \ + --Benchmark:MaxConcurrency=128 \ + --Benchmark:RampUpStepSeconds=5 \ + --Benchmark:RampUpConcurrencyStep=4 \ + --Benchmark:TestDurationSeconds=300 +``` + +### Query-heavy workload + +```bash +dotnet run -- \ + --Benchmark:CreateWeight=20 \ + --Benchmark:QueryWeight=80 +``` + +### Unlimited duration (manual stop) + +```bash +dotnet run -- --Benchmark:TestDurationSeconds=0 +# Press Ctrl+C when done +``` + +--- + +## Interpreting Results + +| Metric | What to look for | +|--------|------------------| +| **Creates/min** | Higher is better. Watch for plateau or drop as concurrency increases — indicates a bottleneck. | +| **Avg Latency** | Should stay stable as load increases. A steep rise signals saturation. | +| **P95 / P99** | Tail latency. If P99 >> Avg, there's occasional contention (DB locks, GC pauses, etc.). | +| **Error rate** | Should be 0% during the run. `The operation was canceled` errors at shutdown are normal and harmless. | +| **Throughput chart** | Ideally rises linearly with concurrency, then plateaus. A _drop_ means the system is overloaded. | +| **Latency chart** | Inverse of throughput: should stay flat, then rise. A hockey-stick shape = saturation point found. | + +### Common Bottlenecks + +- **Connection pool exhaustion** — P99 spikes, timeouts appear. Increase pool + size in the repository's DB config. +- **Thread pool starvation** — Latency climbs across the board. Look for + sync-over-async calls in the repository. +- **Database lock contention** — Creates slow down while queries stay fast (or + vice versa). Check DB lock waits. +- **GC pressure** — Periodic latency spikes visible in the chart. Monitor server + GC metrics. + +--- + +## License + +Part of the [sensenet](https://github.com/SenseNet/sensenet) project. See the +repository root [LICENSE](../../LICENSE) for details. diff --git a/tools/SnBenchmark/ReportGenerator.cs b/tools/SnBenchmark/ReportGenerator.cs new file mode 100644 index 000000000..515e9134b --- /dev/null +++ b/tools/SnBenchmark/ReportGenerator.cs @@ -0,0 +1,265 @@ +using System.Globalization; +using System.Text; + +namespace SnBenchmark; + +/// +/// Generates a Markdown benchmark report with all metrics, time-series +/// data, configuration, and error summary. +/// +public static class ReportGenerator +{ + public static async Task GenerateAsync( + BenchmarkOptions opts, MetricsCollector metrics, int peakConcurrency) + { + var snap = metrics.GetSnapshot(peakConcurrency); + var slices = metrics.GetTimeSlices(5); + var allResults = metrics.GetAllResults(); + var timestamp = DateTime.Now; + var fileName = $"benchmark-report-{timestamp:yyyyMMdd-HHmmss}.md"; + + var dir = Path.GetFullPath(opts.ReportDirectory); + Directory.CreateDirectory(dir); + var filePath = Path.Combine(dir, fileName); + + var sb = new StringBuilder(); + + // ── Header + sb.AppendLine("# sensenet Benchmark Report"); + sb.AppendLine(); + sb.AppendLine($"**Generated:** {timestamp:yyyy-MM-dd HH:mm:ss} "); + sb.AppendLine($"**Repository:** `{opts.RepositoryUrl}` "); + sb.AppendLine($"**Duration:** {snap.Elapsed:hh\\:mm\\:ss} "); + sb.AppendLine(); + + // ── Configuration + sb.AppendLine("## ⚙ Configuration"); + sb.AppendLine(); + sb.AppendLine("| Parameter | Value |"); + sb.AppendLine("|-----------|-------|"); + sb.AppendLine($"| Base Path | `{opts.BasePath}` |"); + sb.AppendLine($"| Content Type | `{opts.ContentTypeName}` |"); + sb.AppendLine($"| Initial Concurrency | {opts.InitialConcurrency} |"); + sb.AppendLine($"| Max Concurrency | {opts.MaxConcurrency} |"); + sb.AppendLine($"| Ramp-Up Step | +{opts.RampUpConcurrencyStep} every {opts.RampUpStepSeconds}s |"); + sb.AppendLine($"| Test Duration | {(opts.TestDurationSeconds > 0 ? $"{opts.TestDurationSeconds}s" : "unlimited")} |"); + sb.AppendLine($"| Request Timeout | {opts.RequestTimeoutSeconds}s |"); + sb.AppendLine($"| Create/Query Mix | {opts.CreateWeight}% / {opts.QueryWeight}% |"); + sb.AppendLine(); + + // ── Summary + sb.AppendLine("## 📊 Summary"); + sb.AppendLine(); + sb.AppendLine("| Metric | Value |"); + sb.AppendLine("|--------|-------|"); + sb.AppendLine($"| Total Requests | {snap.TotalRequests:N0} |"); + sb.AppendLine($"| Successful | {snap.TotalSuccessful:N0} |"); + sb.AppendLine($"| Failed | {snap.TotalFailed:N0} |"); + sb.AppendLine($"| Success Rate | {(snap.TotalRequests > 0 ? (double)snap.TotalSuccessful / snap.TotalRequests * 100 : 0):F1}% |"); + sb.AppendLine($"| Peak Concurrency | {peakConcurrency} |"); + sb.AppendLine($"| Total Creates | {snap.TotalCreates:N0} |"); + sb.AppendLine($"| Total Queries | {snap.TotalQueries:N0} |"); + sb.AppendLine(); + + // ── Throughput + sb.AppendLine("## ⚡ Throughput"); + sb.AppendLine(); + sb.AppendLine("| Metric | Value |"); + sb.AppendLine("|--------|-------|"); + sb.AppendLine($"| Creates / minute | {snap.CreatesPerMinute:F1} |"); + sb.AppendLine($"| Queries / minute | {snap.QueriesPerMinute:F1} |"); + sb.AppendLine($"| Total req / minute | {(snap.TotalRequests / Math.Max(snap.Elapsed.TotalMinutes, 0.01)):F1} |"); + sb.AppendLine(); + + // ── Latency + sb.AppendLine("## 🕐 Latency (ms)"); + sb.AppendLine(); + sb.AppendLine("| Metric | Create | Query |"); + sb.AppendLine("|--------|--------|-------|"); + sb.AppendLine($"| Average | {snap.AvgCreateMs:F0} | {snap.AvgQueryMs:F0} |"); + sb.AppendLine($"| P95 | {snap.P95CreateMs:F0} | {snap.P95QueryMs:F0} |"); + sb.AppendLine($"| P99 | {snap.P99CreateMs:F0} | {snap.P99QueryMs:F0} |"); + sb.AppendLine($"| Max | {snap.MaxCreateMs:F0} | {snap.MaxQueryMs:F0} |"); + sb.AppendLine(); + + // ── Time series + if (slices.Count > 0) + { + sb.AppendLine("## 📈 Time Series (5-second intervals)"); + sb.AppendLine(); + sb.AppendLine("| Time (s) | Requests | Success | Failed | Avg Latency (ms) | Creates | Queries |"); + sb.AppendLine("|----------|----------|---------|--------|-------------------|---------|---------|"); + foreach (var s in slices) + { + sb.AppendLine(string.Format(CultureInfo.InvariantCulture, + "| {0:F0} | {1} | {2} | {3} | {4:F0} | {5} | {6} |", + s.OffsetSeconds, s.RequestCount, s.SuccessCount, + s.FailCount, s.AvgLatencyMs, s.CreateCount, s.QueryCount)); + } + sb.AppendLine(); + + // ASCII chart: throughput over time + sb.AppendLine("### Throughput Over Time"); + sb.AppendLine(); + sb.AppendLine("```"); + AppendAsciiChart(sb, slices); + sb.AppendLine("```"); + sb.AppendLine(); + + // ASCII chart: latency over time + sb.AppendLine("### Latency Over Time"); + sb.AppendLine(); + sb.AppendLine("```"); + AppendLatencyChart(sb, slices); + sb.AppendLine("```"); + sb.AppendLine(); + } + + // ── Latency distribution + sb.AppendLine("## 📊 Latency Distribution"); + sb.AppendLine(); + AppendLatencyHistogram(sb, allResults); + sb.AppendLine(); + + // ── Errors + if (snap.ErrorMessages.Count > 0) + { + sb.AppendLine("## ❌ Errors"); + sb.AppendLine(); + sb.AppendLine("| Error | Count |"); + sb.AppendLine("|-------|-------|"); + foreach (var (msg, count) in snap.ErrorMessages) + sb.AppendLine($"| {EscapeMd(msg)} | {count} |"); + sb.AppendLine(); + } + + // ── Footer + sb.AppendLine("---"); + sb.AppendLine($"*Report generated by SnBenchmark v1.0 at {timestamp:O}*"); + + await File.WriteAllTextAsync(filePath, sb.ToString()); + return filePath; + } + + // ── ASCII charts ──────────────────────────────────────────────── + + private static void AppendAsciiChart(StringBuilder sb, IReadOnlyList slices) + { + const int height = 15; + const int width = 60; + + var maxVal = slices.Max(s => s.RequestCount); + if (maxVal == 0) maxVal = 1; + + // Resample to width + var data = Resample(slices.Select(s => (double)s.RequestCount).ToArray(), width); + + sb.AppendLine($" Requests per 5s interval (max: {maxVal})"); + for (var row = height; row >= 1; row--) + { + var threshold = (double)row / height * maxVal; + sb.Append(row == height ? $"{maxVal,5} │" : row == 1 ? $"{"0",5} │" : " │"); + + foreach (var val in data) + { + sb.Append(val >= threshold ? '█' : ' '); + } + sb.AppendLine(); + } + sb.Append(" └"); + sb.AppendLine(new string('─', width)); + + var totalSecs = slices.Last().OffsetSeconds; + sb.AppendLine($" 0s{new string(' ', width / 2 - 5)}{totalSecs / 2:F0}s{new string(' ', width / 2 - 5)}{totalSecs:F0}s"); + } + + private static void AppendLatencyChart(StringBuilder sb, IReadOnlyList slices) + { + const int height = 12; + const int width = 60; + + var maxVal = slices.Max(s => s.AvgLatencyMs); + if (maxVal < 1) maxVal = 1; + + var data = Resample(slices.Select(s => s.AvgLatencyMs).ToArray(), width); + + sb.AppendLine($" Avg latency ms (max: {maxVal:F0})"); + for (var row = height; row >= 1; row--) + { + var threshold = (double)row / height * maxVal; + sb.Append(row == height ? $"{maxVal,7:F0} │" : row == 1 ? $"{"0",7} │" : " │"); + + foreach (var val in data) + { + sb.Append(val >= threshold ? '▓' : ' '); + } + sb.AppendLine(); + } + sb.Append(" └"); + sb.AppendLine(new string('─', width)); + } + + private static void AppendLatencyHistogram(StringBuilder sb, IReadOnlyList results) + { + var buckets = new (string Label, double Min, double Max)[] + { + ("< 50ms", 0, 50), + ("50-100ms", 50, 100), + ("100-200ms", 100, 200), + ("200-500ms", 200, 500), + ("500ms-1s", 500, 1000), + ("1-2s", 1000, 2000), + ("2-5s", 2000, 5000), + ("5-10s", 5000, 10000), + ("> 10s", 10000, double.MaxValue), + }; + + sb.AppendLine("```"); + var maxCount = 0; + var bucketCounts = buckets.Select(b => + { + var count = results.Count(r => r.ElapsedMs >= b.Min && r.ElapsedMs < b.Max); + if (count > maxCount) maxCount = count; + return (b.Label, Count: count); + }).ToArray(); + + if (maxCount == 0) maxCount = 1; + const int barWidth = 40; + + foreach (var (label, count) in bucketCounts) + { + var width = (int)((double)count / maxCount * barWidth); + var bar = new string('█', width) + new string('░', barWidth - width); + sb.AppendLine($" {label,10} │{bar}│ {count:N0}"); + } + sb.AppendLine("```"); + } + + // ── Helpers ────────────────────────────────────────────────────── + + private static double[] Resample(double[] data, int targetLen) + { + if (data.Length == 0) return new double[targetLen]; + if (data.Length <= targetLen) return data; + + var result = new double[targetLen]; + var ratio = (double)data.Length / targetLen; + + for (var i = 0; i < targetLen; i++) + { + var start = (int)(i * ratio); + var end = (int)((i + 1) * ratio); + if (end > data.Length) end = data.Length; + if (start >= end) { result[i] = data[start]; continue; } + + result[i] = 0; + for (var j = start; j < end; j++) + result[i] += data[j]; + result[i] /= (end - start); + } + return result; + } + + private static string EscapeMd(string text) => + text.Replace("|", "\\|").Replace("\n", " "); +} diff --git a/tools/SnBenchmark/SenseNetClient.cs b/tools/SnBenchmark/SenseNetClient.cs new file mode 100644 index 000000000..1ba020da3 --- /dev/null +++ b/tools/SnBenchmark/SenseNetClient.cs @@ -0,0 +1,233 @@ +using System.Diagnostics; +using System.Net; +using System.Net.Http.Headers; +using System.Text; +using System.Text.Json; + +namespace SnBenchmark; + +/// +/// Lightweight HTTP client wrapping sensenet OData REST API calls. +/// Thread-safe – a single instance is shared across all workers. +/// +public sealed class SenseNetClient : IDisposable +{ + private readonly HttpClient _http; + private readonly string _baseUrl; + private readonly string _basePath; + private readonly string _contentType; + private static readonly JsonSerializerOptions JsonOpts = new() + { + PropertyNameCaseInsensitive = true, + WriteIndented = false + }; + + public SenseNetClient(BenchmarkOptions opts) + { + _baseUrl = opts.RepositoryUrl.TrimEnd('/'); + _basePath = opts.BasePath; + _contentType = opts.ContentTypeName; + + var handler = new HttpClientHandler(); + if (opts.SkipTlsValidation) + { + handler.ServerCertificateCustomValidationCallback = + HttpClientHandler.DangerousAcceptAnyServerCertificateValidator; + } + + _http = new HttpClient(handler) + { + BaseAddress = new Uri(_baseUrl), + Timeout = TimeSpan.FromSeconds(opts.RequestTimeoutSeconds), + }; + _http.DefaultRequestHeaders.Add("apikey", opts.ApiKey); + _http.DefaultRequestHeaders.Accept.Add( + new MediaTypeWithQualityHeaderValue("application/json")); + } + + // ── Ensure base folder ────────────────────────────────────────── + + /// + /// Ensures the /Root/Content/Benchmark folder exists. + /// Creates it recursively if needed. + /// + public async Task EnsureBaseFolderAsync(CancellationToken ct) + { + var segments = _basePath.Split('/', StringSplitOptions.RemoveEmptyEntries); + var current = string.Empty; + + for (var i = 0; i < segments.Length; i++) + { + var parent = current == string.Empty ? "/" + segments[0] : current; + current = "/" + string.Join("/", segments.Take(i + 1)); + + if (i < 1) continue; // skip /Root – always exists + + // Check if exists + var checkUrl = $"/OData.svc{current}?metadata=no&$select=Id"; + var resp = await _http.GetAsync(checkUrl, ct); + if (resp.StatusCode == HttpStatusCode.OK) continue; + + // Create folder under parent + var parentOData = $"/OData.svc{parent}"; + var body = new + { + __ContentType = "Folder", + Name = segments[i], + DisplayName = segments[i] + }; + var json = JsonSerializer.Serialize(body, JsonOpts); + var content = new StringContent(json, Encoding.UTF8, "application/json"); + var createResp = await _http.PostAsync(parentOData, content, ct); + createResp.EnsureSuccessStatusCode(); + } + } + + // ── Create content ────────────────────────────────────────────── + + /// + /// Creates a content node under the benchmark folder. + /// Returns (success, elapsedMs). + /// + public async Task CreateContentAsync(string name, int workerIndex, + CancellationToken ct) + { + var sw = Stopwatch.StartNew(); + try + { + var url = $"/OData.svc{_basePath}"; + var body = new + { + __ContentType = _contentType, + Name = name, + DisplayName = $"Bench-{workerIndex}-{name}", + Description = $"Benchmark content created at {DateTime.UtcNow:O} by worker {workerIndex}" + }; + var json = JsonSerializer.Serialize(body, JsonOpts); + var content = new StringContent(json, Encoding.UTF8, "application/json"); + var resp = await _http.PostAsync(url, content, ct); + sw.Stop(); + + return new RequestResult + { + Operation = OperationType.Create, + Success = resp.IsSuccessStatusCode, + StatusCode = (int)resp.StatusCode, + ElapsedMs = sw.Elapsed.TotalMilliseconds, + Timestamp = DateTime.UtcNow, + WorkerIndex = workerIndex, + ErrorMessage = resp.IsSuccessStatusCode + ? null + : await ReadErrorAsync(resp) + }; + } + catch (Exception ex) + { + sw.Stop(); + return new RequestResult + { + Operation = OperationType.Create, + Success = false, + StatusCode = 0, + ElapsedMs = sw.Elapsed.TotalMilliseconds, + Timestamp = DateTime.UtcNow, + WorkerIndex = workerIndex, + ErrorMessage = ex.Message + }; + } + } + + // ── Query content ─────────────────────────────────────────────── + + /// + /// Queries content under the benchmark folder with OData. + /// Returns (success, elapsedMs). + /// + public async Task QueryContentAsync(int workerIndex, CancellationToken ct) + { + var sw = Stopwatch.StartNew(); + try + { + var skip = Random.Shared.Next(0, 50); + var url = $"/OData.svc{_basePath}" + + $"?metadata=no&$orderby=CreationDate desc&$top=10&$skip={skip}" + + $"&$select=Id,Name,DisplayName,CreationDate"; + var resp = await _http.GetAsync(url, ct); + sw.Stop(); + + return new RequestResult + { + Operation = OperationType.Query, + Success = resp.IsSuccessStatusCode, + StatusCode = (int)resp.StatusCode, + ElapsedMs = sw.Elapsed.TotalMilliseconds, + Timestamp = DateTime.UtcNow, + WorkerIndex = workerIndex, + ErrorMessage = resp.IsSuccessStatusCode + ? null + : await ReadErrorAsync(resp) + }; + } + catch (Exception ex) + { + sw.Stop(); + return new RequestResult + { + Operation = OperationType.Query, + Success = false, + StatusCode = 0, + ElapsedMs = sw.Elapsed.TotalMilliseconds, + Timestamp = DateTime.UtcNow, + WorkerIndex = workerIndex, + ErrorMessage = ex.Message + }; + } + } + + // ── Health check ──────────────────────────────────────────────── + + public async Task IsAliveAsync(CancellationToken ct) + { + try + { + var resp = await _http.GetAsync("/OData.svc/Root?metadata=no&$select=Id", ct); + return resp.IsSuccessStatusCode; + } + catch + { + return false; + } + } + + // ── Helpers ───────────────────────────────────────────────────── + + private static async Task ReadErrorAsync(HttpResponseMessage resp) + { + try + { + var body = await resp.Content.ReadAsStringAsync(); + return body.Length > 300 ? body[..300] + "…" : body; + } + catch + { + return $"HTTP {(int)resp.StatusCode}"; + } + } + + public void Dispose() => _http.Dispose(); +} + +// ── DTOs ──────────────────────────────────────────────────────────── + +public enum OperationType { Create, Query } + +public sealed class RequestResult +{ + public OperationType Operation { get; init; } + public bool Success { get; init; } + public int StatusCode { get; init; } + public double ElapsedMs { get; init; } + public DateTime Timestamp { get; init; } + public int WorkerIndex { get; init; } + public string? ErrorMessage { get; init; } +} diff --git a/tools/SnBenchmark/SnBenchmark.csproj b/tools/SnBenchmark/SnBenchmark.csproj new file mode 100644 index 000000000..1c92fcaa1 --- /dev/null +++ b/tools/SnBenchmark/SnBenchmark.csproj @@ -0,0 +1,27 @@ + + + + Exe + net10.0 + enable + enable + SnBenchmark + SnBenchmark + + + + + + + + + + + + + + PreserveNewest + + + + diff --git a/tools/SnBenchmark/appsettings.json b/tools/SnBenchmark/appsettings.json new file mode 100644 index 000000000..dad25a0a4 --- /dev/null +++ b/tools/SnBenchmark/appsettings.json @@ -0,0 +1,23 @@ +{ + "Benchmark": { + "RepositoryUrl": "https://localhost:44362", + "ApiKey": "e1EWRg2JrS9h3tdwweYW6C4jqhhp2xWXAmzjLInzCU2jUudPqtLxIy0u8WVd2wwvIBB4er4OdKOcvmUEi9d", + + "BasePath": "/Root/Content/Benchmark", + "ContentTypeName": "File", + + "InitialConcurrency": 2, + "MaxConcurrency": 128, + "RampUpStepSeconds": 10, + "RampUpConcurrencyStep": 2, + + "TestDurationSeconds": 12000, + "RequestTimeoutSeconds": 30, + + "CreateWeight": 70, + "QueryWeight": 30, + + "ReportDirectory": "./reports", + "SkipTlsValidation": true + } +} diff --git a/tools/SnDbMigrator/Checkpoint.cs b/tools/SnDbMigrator/Checkpoint.cs new file mode 100644 index 000000000..7fbdeb7a9 --- /dev/null +++ b/tools/SnDbMigrator/Checkpoint.cs @@ -0,0 +1,99 @@ +using System.Text.Json; + +namespace SnDbMigrator; + +/// +/// Tracks migration progress so that a failed migration can be resumed +/// from the last completed table instead of starting over. +/// +public sealed class Checkpoint +{ + private string _filePath; + + public Dictionary Tables { get; set; } = new(); + public DateTime StartedAt { get; set; } + public DateTime? CompletedAt { get; set; } + + public Checkpoint() : this("") { } + + public Checkpoint(string filePath) + { + _filePath = filePath; + } + + /// Load checkpoint from disk, or create a new one. + public static Checkpoint Load(string filePath) + { + if (File.Exists(filePath)) + { + var json = File.ReadAllText(filePath); + var cp = JsonSerializer.Deserialize(json, + new JsonSerializerOptions { PropertyNameCaseInsensitive = true })!; + cp._filePath = filePath; + return cp; + } + + return new Checkpoint(filePath) { StartedAt = DateTime.UtcNow }; + } + + public bool IsTableCompleted(string tableName) + => Tables.TryGetValue(tableName, out var tc) && tc.Completed; + + public long GetLastId(string tableName) + => Tables.TryGetValue(tableName, out var tc) ? tc.LastId : 0; + + public void MarkTableProgress(string tableName, long lastId, long rowsMigrated) + { + if (!Tables.TryGetValue(tableName, out var tc)) + { + tc = new TableCheckpoint(); + Tables[tableName] = tc; + } + tc.LastId = lastId; + tc.RowsMigrated = rowsMigrated; + Save(); + } + + public void MarkTableCompleted(string tableName, long totalRows) + { + if (!Tables.TryGetValue(tableName, out var tc)) + { + tc = new TableCheckpoint(); + Tables[tableName] = tc; + } + tc.Completed = true; + tc.RowsMigrated = totalRows; + tc.CompletedAt = DateTime.UtcNow; + Save(); + } + + public void MarkMigrationCompleted() + { + CompletedAt = DateTime.UtcNow; + Save(); + } + + public void Delete() + { + if (File.Exists(_filePath)) + File.Delete(_filePath); + } + + private void Save() + { + var json = JsonSerializer.Serialize(this, new JsonSerializerOptions + { + WriteIndented = true, + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }); + File.WriteAllText(_filePath, json); + } +} + +public sealed class TableCheckpoint +{ + public bool Completed { get; set; } + public long LastId { get; set; } + public long RowsMigrated { get; set; } + public DateTime? CompletedAt { get; set; } +} diff --git a/tools/SnDbMigrator/ForeignKeyManager.cs b/tools/SnDbMigrator/ForeignKeyManager.cs new file mode 100644 index 000000000..db0892110 --- /dev/null +++ b/tools/SnDbMigrator/ForeignKeyManager.cs @@ -0,0 +1,93 @@ +using Npgsql; + +namespace SnDbMigrator; + +/// +/// Manages PostgreSQL foreign key constraints during migration. +/// Disables FKs before data load and re-enables them after. +/// +public sealed class ForeignKeyManager +{ + private readonly string _connectionString; + + public ForeignKeyManager(string connectionString) + { + _connectionString = connectionString; + } + + /// Disables all FK constraints by setting them NOT VALID and disabling triggers. + public async Task DisableForeignKeysAsync(CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_connectionString); + await conn.OpenAsync(ct); + + // Disable all triggers (which includes FK enforcement) + foreach (var table in SenseNetSchema.Tables) + { + try + { + await using var cmd = new NpgsqlCommand( + $"ALTER TABLE \"{table.Name}\" DISABLE TRIGGER ALL", conn); + cmd.CommandTimeout = 30; + await cmd.ExecuteNonQueryAsync(ct); + } + catch (PostgresException ex) when (ex.SqlState == "42P01") // table does not exist + { + // Skip optional tables that don't exist + } + } + } + + /// Re-enables all FK constraints and validates them. + public async Task EnableForeignKeysAsync(CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_connectionString); + await conn.OpenAsync(ct); + + // Re-enable triggers + foreach (var table in SenseNetSchema.Tables) + { + try + { + await using var cmd = new NpgsqlCommand( + $"ALTER TABLE \"{table.Name}\" ENABLE TRIGGER ALL", conn); + cmd.CommandTimeout = 30; + await cmd.ExecuteNonQueryAsync(ct); + } + catch (PostgresException ex) when (ex.SqlState == "42P01") + { + // Skip optional tables + } + } + + // Validate all FK constraints + foreach (var fk in SenseNetSchema.ForeignKeys) + { + try + { + await using var cmd = new NpgsqlCommand( + $"ALTER TABLE \"{fk.Table}\" VALIDATE CONSTRAINT \"{fk.ConstraintName}\"", conn); + cmd.CommandTimeout = 300; + await cmd.ExecuteNonQueryAsync(ct); + } + catch (PostgresException ex) when ( + ex.SqlState == "42P01" || // table does not exist + ex.SqlState == "42704") // constraint does not exist + { + // Skip missing constraints + } + } + } + + /// Allow explicit identity values to be inserted (override SERIAL). + public async Task EnableIdentityInsertAsync(CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_connectionString); + await conn.OpenAsync(ct); + + // For SERIAL columns, PostgreSQL doesn't need SET IDENTITY_INSERT. + // We just need to make sure the column is writable, which it always is + // for SERIAL (it's just a DEFAULT from a sequence, not a constraint). + // Nothing needed here for PostgreSQL SERIAL columns. + } +} diff --git a/tools/SnDbMigrator/MigrationEngine.cs b/tools/SnDbMigrator/MigrationEngine.cs new file mode 100644 index 000000000..02a5801dd --- /dev/null +++ b/tools/SnDbMigrator/MigrationEngine.cs @@ -0,0 +1,305 @@ +using System.Diagnostics; +using Microsoft.Data.SqlClient; +using Npgsql; +using Spectre.Console; + +namespace SnDbMigrator; + +/// +/// Orchestrates the full migration pipeline: +/// 1. Pre-flight connectivity checks +/// 2. Disable FK constraints & triggers +/// 3. Truncate target tables (if configured) +/// 4. Migrate each table in dependency order +/// 5. Fix SERIAL sequences +/// 6. Re-enable FK constraints & triggers +/// 7. Verify row counts +/// +public sealed class MigrationEngine +{ + private readonly MigrationOptions _options; + private readonly Checkpoint _checkpoint; + private readonly TableMigrator _tableMigrator; + private readonly SequenceFixer _sequenceFixer; + private readonly ForeignKeyManager _fkManager; + + public MigrationEngine(MigrationOptions options) + { + _options = options; + _checkpoint = Checkpoint.Load(options.CheckpointFile); + _tableMigrator = new TableMigrator(options, _checkpoint, OnTableProgress); + _sequenceFixer = new SequenceFixer(options.Target.ConnectionString); + _fkManager = new ForeignKeyManager(options.Target.ConnectionString); + } + + /// Current table being migrated (for UI). + public string CurrentTable { get; private set; } = ""; + public long CurrentTableRows { get; private set; } + public long CurrentTableTotal { get; private set; } + public Dictionary CompletedTables { get; } = new(); + + public event Action? ProgressChanged; + + private void OnTableProgress(string table, long done, long total) + { + CurrentTable = table; + CurrentTableRows = done; + CurrentTableTotal = total; + ProgressChanged?.Invoke(); + } + + /// + /// Runs the full migration. + /// + public async Task RunAsync(CancellationToken ct) + { + var sw = Stopwatch.StartNew(); + var result = new MigrationResult(); + + // ── 1. Pre-flight checks ─────────────────────────────── + AnsiConsole.MarkupLine("[blue]▸[/] Testing source (MSSQL) connection..."); + if (!await TestSourceConnection(ct)) + { + result.Success = false; + result.Error = "Cannot connect to source MSSQL database."; + return result; + } + AnsiConsole.MarkupLine("[green] ✓[/] Source connected."); + + AnsiConsole.MarkupLine("[blue]▸[/] Testing target (PostgreSQL) connection..."); + if (!await TestTargetConnection(ct)) + { + result.Success = false; + result.Error = "Cannot connect to target PostgreSQL database."; + return result; + } + AnsiConsole.MarkupLine("[green] ✓[/] Target connected."); + + // ── 2. Determine tables to migrate ───────────────────── + var tablesToMigrate = new List(); + foreach (var table in SenseNetSchema.Tables) + { + if (_options.SkipTables.Contains(table.Name, StringComparer.OrdinalIgnoreCase)) + { + AnsiConsole.MarkupLine($"[yellow] ⊘[/] Skipping [bold]{table.Name}[/] (configured)"); + continue; + } + + var srcExists = await _tableMigrator.SourceTableExistsAsync(table.Name, ct); + if (!srcExists) + { + if (table.Optional) + { + AnsiConsole.MarkupLine($"[dim] ○[/] {table.Name} (not in source, optional)"); + continue; + } + AnsiConsole.MarkupLine($"[yellow] ⚠[/] {table.Name} not found in source!"); + continue; + } + + var tgtExists = await _tableMigrator.TargetTableExistsAsync(table.Name, ct); + if (!tgtExists) + { + AnsiConsole.MarkupLine($"[yellow] ⚠[/] {table.Name} not found in target!"); + continue; + } + + tablesToMigrate.Add(table); + } + + AnsiConsole.MarkupLine($"\n[blue]▸[/] {tablesToMigrate.Count} tables to migrate.\n"); + + // ── 3. Disable FK constraints ────────────────────────── + if (_options.DisableForeignKeys) + { + AnsiConsole.MarkupLine("[blue]▸[/] Disabling foreign key constraints..."); + await _fkManager.DisableForeignKeysAsync(ct); + AnsiConsole.MarkupLine("[green] ✓[/] FK constraints disabled."); + } + + // ── 4. Truncate target tables (reverse order) ────────── + if (_options.TruncateTarget && _checkpoint.Tables.Count == 0) + { + AnsiConsole.MarkupLine("[blue]▸[/] Truncating target tables..."); + foreach (var table in tablesToMigrate.AsEnumerable().Reverse()) + { + try + { + await _tableMigrator.TruncateTableAsync(table.Name, ct); + } + catch (Exception ex) + { + AnsiConsole.MarkupLine($"[yellow] ⚠[/] Truncate {table.Name}: {ex.Message}"); + } + } + AnsiConsole.MarkupLine("[green] ✓[/] Target tables truncated."); + } + + // ── 5. Migrate tables ────────────────────────────────── + AnsiConsole.WriteLine(); + + await AnsiConsole.Progress() + .AutoClear(false) + .HideCompleted(false) + .Columns( + new TaskDescriptionColumn(), + new ProgressBarColumn(), + new PercentageColumn(), + new SpinnerColumn(), + new RemainingTimeColumn()) + .StartAsync(async ctx => + { + foreach (var table in tablesToMigrate) + { + ct.ThrowIfCancellationRequested(); + + if (_checkpoint.IsTableCompleted(table.Name)) + { + var tc = _checkpoint.Tables[table.Name]; + var task = ctx.AddTask($"[green]✓[/] {table.Name}", maxValue: tc.RowsMigrated); + task.Value = tc.RowsMigrated; + CompletedTables[table.Name] = (tc.RowsMigrated, TimeSpan.Zero); + continue; + } + + var progressTask = ctx.AddTask($" {table.Name}", maxValue: 100); + var tableSw = Stopwatch.StartNew(); + + // Wire up progress + void UpdateProgress() + { + if (CurrentTable == table.Name && CurrentTableTotal > 0) + { + progressTask.MaxValue = CurrentTableTotal; + progressTask.Value = CurrentTableRows; + } + } + ProgressChanged += UpdateProgress; + + try + { + var rows = await _tableMigrator.MigrateTableAsync(table, ct); + tableSw.Stop(); + CompletedTables[table.Name] = (rows, tableSw.Elapsed); + result.TotalRows += rows; + + progressTask.Description = $"[green]✓[/] {table.Name}"; + progressTask.MaxValue = Math.Max(rows, 1); + progressTask.Value = Math.Max(rows, 1); + } + catch (Exception ex) + { + progressTask.Description = $"[red]✗[/] {table.Name}: {ex.Message}"; + result.Errors.Add($"{table.Name}: {ex.Message}"); + } + finally + { + ProgressChanged -= UpdateProgress; + } + } + }); + + // ── 6. Fix sequences ─────────────────────────────────── + if (_options.FixSequences) + { + AnsiConsole.MarkupLine("\n[blue]▸[/] Fixing SERIAL sequences..."); + var seqResults = await _sequenceFixer.FixAllSequencesAsync(tablesToMigrate, ct); + foreach (var (table, maxId) in seqResults.Where(x => x.Value > 0)) + { + AnsiConsole.MarkupLine($"[dim] {table}: sequence → {maxId}[/]"); + } + AnsiConsole.MarkupLine("[green] ✓[/] Sequences fixed."); + } + + // ── 7. Re-enable FK constraints ──────────────────────── + if (_options.DisableForeignKeys) + { + AnsiConsole.MarkupLine("[blue]▸[/] Re-enabling foreign key constraints..."); + await _fkManager.EnableForeignKeysAsync(ct); + AnsiConsole.MarkupLine("[green] ✓[/] FK constraints enabled and validated."); + } + + // ── 8. Verify row counts ─────────────────────────────── + if (_options.Verify) + { + AnsiConsole.MarkupLine("\n[blue]▸[/] Verifying row counts..."); + var verifyTable = new Table() + .Border(TableBorder.Rounded) + .AddColumn("Table") + .AddColumn("Source", c => c.RightAligned()) + .AddColumn("Target", c => c.RightAligned()) + .AddColumn("Status"); + + bool allMatch = true; + foreach (var table in tablesToMigrate) + { + var srcCount = await GetSourceRowCount(table.Name, ct); + var tgtCount = await _tableMigrator.GetTargetRowCount(table.Name, ct); + var match = srcCount == tgtCount; + if (!match) allMatch = false; + + verifyTable.AddRow( + table.Name, + srcCount.ToString("N0"), + tgtCount.ToString("N0"), + match ? "[green]✓[/]" : "[red]✗ MISMATCH[/]"); + } + AnsiConsole.Write(verifyTable); + result.Verified = allMatch; + } + + // ── Done ─────────────────────────────────────────────── + sw.Stop(); + result.Success = result.Errors.Count == 0; + result.Elapsed = sw.Elapsed; + result.TablesProcessed = CompletedTables.Count; + + _checkpoint.MarkMigrationCompleted(); + if (result.Success) + _checkpoint.Delete(); // Clean up checkpoint on success + + return result; + } + + private async Task TestSourceConnection(CancellationToken ct) + { + try + { + await using var conn = new SqlConnection(_options.Source.ConnectionString); + await conn.OpenAsync(ct); + return true; + } + catch { return false; } + } + + private async Task TestTargetConnection(CancellationToken ct) + { + try + { + await using var conn = new NpgsqlConnection(_options.Target.ConnectionString); + await conn.OpenAsync(ct); + return true; + } + catch { return false; } + } + + private async Task GetSourceRowCount(string tableName, CancellationToken ct) + { + await using var conn = new SqlConnection(_options.Source.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new SqlCommand($"SELECT COUNT_BIG(*) FROM [{tableName}]", conn); + cmd.CommandTimeout = 120; + return (long)(await cmd.ExecuteScalarAsync(ct))!; + } +} + +public sealed class MigrationResult +{ + public bool Success { get; set; } + public string? Error { get; set; } + public TimeSpan Elapsed { get; set; } + public long TotalRows { get; set; } + public int TablesProcessed { get; set; } + public bool Verified { get; set; } + public List Errors { get; } = []; +} diff --git a/tools/SnDbMigrator/MigrationOptions.cs b/tools/SnDbMigrator/MigrationOptions.cs new file mode 100644 index 000000000..11b0a9a82 --- /dev/null +++ b/tools/SnDbMigrator/MigrationOptions.cs @@ -0,0 +1,43 @@ +namespace SnDbMigrator; + +/// +/// Migration configuration bound from appsettings.json "Migration" section. +/// +public sealed class MigrationOptions +{ + public DbEndpoint Source { get; set; } = new(); + public DbEndpoint Target { get; set; } = new(); + + /// Rows per batch for non-blob tables. + public int BatchSize { get; set; } = 5000; + + /// Rows per batch for blob tables (Files, EFMessages). + public int BlobBatchSize { get; set; } = 50; + + /// Path to checkpoint file for resume support. + public string CheckpointFile { get; set; } = "./migration-checkpoint.json"; + + /// Tables to skip (e.g. LogEntries, StatisticalData). + public string[] SkipTables { get; set; } = []; + + /// Truncate target tables before migration. + public bool TruncateTarget { get; set; } = true; + + /// Disable FK constraints during migration for performance. + public bool DisableForeignKeys { get; set; } = true; + + /// Fix PostgreSQL sequences after migration. + public bool FixSequences { get; set; } = true; + + /// Verify row counts after migration. + public bool Verify { get; set; } = true; +} + +public sealed class DbEndpoint +{ + public string Provider { get; set; } = ""; + public string ConnectionString { get; set; } = ""; + + public bool IsMsSql => Provider.Equals("MsSql", StringComparison.OrdinalIgnoreCase); + public bool IsPostgreSql => Provider.Equals("PostgreSql", StringComparison.OrdinalIgnoreCase); +} diff --git a/tools/SnDbMigrator/Program.cs b/tools/SnDbMigrator/Program.cs new file mode 100644 index 000000000..121d82c41 --- /dev/null +++ b/tools/SnDbMigrator/Program.cs @@ -0,0 +1,196 @@ +using Microsoft.Extensions.Configuration; +using Spectre.Console; + +namespace SnDbMigrator; + +public static class Program +{ + public static async Task Main(string[] args) + { + // ── Banner ───────────────────────────────────────────── + AnsiConsole.Write(new FigletText("SnDbMigrator") + .Color(Color.CornflowerBlue)); + AnsiConsole.MarkupLine("[dim]sensenet MSSQL → PostgreSQL database migrator[/]\n"); + + // ── Load configuration ───────────────────────────────── + var config = new ConfigurationBuilder() + .SetBasePath(Directory.GetCurrentDirectory()) + .AddJsonFile("appsettings.json", optional: false) + .AddJsonFile("appsettings.local.json", optional: true) + .AddEnvironmentVariables("SNMIGRATE_") + .AddCommandLine(args) + .Build(); + + var options = new MigrationOptions(); + config.GetSection("Migration").Bind(options); + + // ── Validate ─────────────────────────────────────────── + if (string.IsNullOrWhiteSpace(options.Source?.ConnectionString)) + { + AnsiConsole.MarkupLine("[red]Error:[/] Source connection string is not configured."); + return 1; + } + if (string.IsNullOrWhiteSpace(options.Target?.ConnectionString)) + { + AnsiConsole.MarkupLine("[red]Error:[/] Target connection string is not configured."); + return 1; + } + if (!options.Source.IsMsSql) + { + AnsiConsole.MarkupLine("[red]Error:[/] Source must be MsSql provider."); + return 1; + } + if (!options.Target.IsPostgreSql) + { + AnsiConsole.MarkupLine("[red]Error:[/] Target must be PostgreSql provider."); + return 1; + } + + // ── Display config ───────────────────────────────────── + var configTable = new Table() + .Border(TableBorder.Rounded) + .Title("[bold]Migration Configuration[/]") + .AddColumn("Setting") + .AddColumn("Value"); + + configTable.AddRow("Source", MaskConnectionString(options.Source.ConnectionString)); + configTable.AddRow("Target", MaskConnectionString(options.Target.ConnectionString)); + configTable.AddRow("Batch Size", options.BatchSize.ToString("N0")); + configTable.AddRow("Blob Batch Size", options.BlobBatchSize.ToString("N0")); + configTable.AddRow("Truncate Target", options.TruncateTarget ? "[yellow]Yes[/]" : "No"); + configTable.AddRow("Disable FK", options.DisableForeignKeys ? "Yes" : "No"); + configTable.AddRow("Fix Sequences", options.FixSequences ? "Yes" : "No"); + configTable.AddRow("Verify", options.Verify ? "Yes" : "No"); + configTable.AddRow("Checkpoint File", options.CheckpointFile); + + if (options.SkipTables.Length > 0) + configTable.AddRow("Skip Tables", string.Join(", ", options.SkipTables)); + + AnsiConsole.Write(configTable); + AnsiConsole.WriteLine(); + + // ── Check for existing checkpoint ────────────────────── + if (File.Exists(options.CheckpointFile)) + { + var resume = AnsiConsole.Confirm( + "[yellow]A checkpoint file exists. Resume previous migration?[/]", true); + if (!resume) + { + File.Delete(options.CheckpointFile); + AnsiConsole.MarkupLine("[dim]Checkpoint cleared.[/]"); + } + else + { + AnsiConsole.MarkupLine("[green]Resuming from checkpoint...[/]"); + } + } + + // ── Confirm destructive operation ────────────────────── + if (options.TruncateTarget && !File.Exists(options.CheckpointFile)) + { + var proceed = AnsiConsole.Confirm( + "[red]⚠ This will TRUNCATE all target tables. Continue?[/]", false); + if (!proceed) + { + AnsiConsole.MarkupLine("[dim]Aborted.[/]"); + return 0; + } + } + + // ── Run migration ────────────────────────────────────── + using var cts = new CancellationTokenSource(); + Console.CancelKeyPress += (_, e) => + { + e.Cancel = true; + cts.Cancel(); + AnsiConsole.MarkupLine("\n[yellow]Cancellation requested. Finishing current batch...[/]"); + }; + + var engine = new MigrationEngine(options); + MigrationResult result; + + try + { + result = await engine.RunAsync(cts.Token); + } + catch (OperationCanceledException) + { + AnsiConsole.MarkupLine("\n[yellow]Migration cancelled. Progress saved to checkpoint.[/]"); + return 2; + } + catch (Exception ex) + { + AnsiConsole.WriteException(ex); + return 1; + } + + // ── Summary ──────────────────────────────────────────── + AnsiConsole.WriteLine(); + var summaryPanel = new Panel( + new Rows( + new Markup($"[bold]Status:[/] {(result.Success ? "[green]SUCCESS[/]" : "[red]FAILED[/]")}"), + new Markup($"[bold]Duration:[/] {result.Elapsed:hh\\:mm\\:ss\\.fff}"), + new Markup($"[bold]Tables:[/] {result.TablesProcessed}"), + new Markup($"[bold]Total Rows:[/] {result.TotalRows:N0}"), + new Markup($"[bold]Throughput:[/] {(result.Elapsed.TotalSeconds > 0 ? (result.TotalRows / result.Elapsed.TotalSeconds).ToString("N0") : "—")} rows/sec"), + result.Verified + ? new Markup("[bold]Verified:[/] [green]All row counts match ✓[/]") + : result.Success + ? new Markup("[bold]Verified:[/] [dim]Not requested[/]") + : new Markup("[bold]Verified:[/] [red]Row count mismatches detected[/]") + )) + .Header("[bold]Migration Summary[/]") + .Border(BoxBorder.Double) + .Padding(1, 0); + + AnsiConsole.Write(summaryPanel); + + if (result.Errors.Count > 0) + { + AnsiConsole.MarkupLine("\n[red]Errors:[/]"); + foreach (var err in result.Errors) + AnsiConsole.MarkupLine($" [red]•[/] {err}"); + } + + // ── Per-table breakdown ──────────────────────────────── + if (engine.CompletedTables.Count > 0) + { + AnsiConsole.WriteLine(); + var breakdown = new Table() + .Border(TableBorder.Rounded) + .Title("[bold]Per-Table Breakdown[/]") + .AddColumn("Table") + .AddColumn("Rows", c => c.RightAligned()) + .AddColumn("Time") + .AddColumn("Rate"); + + foreach (var (table, (rows, elapsed)) in engine.CompletedTables.OrderBy(x => x.Key)) + { + var rate = elapsed.TotalSeconds > 0 + ? $"{rows / elapsed.TotalSeconds:N0} rows/s" + : "—"; + breakdown.AddRow( + table, + rows.ToString("N0"), + elapsed == TimeSpan.Zero ? "[dim]cached[/]" : elapsed.ToString(@"mm\:ss\.fff"), + elapsed == TimeSpan.Zero ? "[dim]—[/]" : rate); + } + AnsiConsole.Write(breakdown); + } + + return result.Success ? 0 : 1; + } + + private static string MaskConnectionString(string cs) + { + // Mask password in connection string for display + var parts = cs.Split(';'); + for (int i = 0; i < parts.Length; i++) + { + var kv = parts[i].Split('=', 2); + if (kv.Length == 2 && kv[0].Trim().Equals("Password", StringComparison.OrdinalIgnoreCase)) + parts[i] = $"{kv[0]}=****"; + } + return string.Join(";", parts); + } +} diff --git a/tools/SnDbMigrator/README.md b/tools/SnDbMigrator/README.md new file mode 100644 index 000000000..44fd8d3e1 --- /dev/null +++ b/tools/SnDbMigrator/README.md @@ -0,0 +1,164 @@ +# SnDbMigrator + +A production-grade **MSSQL → PostgreSQL** database migrator for sensenet CMS. + +Designed for large databases (300 GB+) with streaming blob support, checkpoint/resume, and a live progress dashboard. + +## Features + +| Feature | Description | +|---|---| +| **Streaming blobs** | `Files.Stream` and `EFMessages.Body` are read/written without loading entire BLOBs into memory | +| **COPY BINARY** | Non-blob tables use PostgreSQL `COPY BINARY` protocol for maximum throughput | +| **Checkpoint / resume** | Progress is saved after each table — a failed migration can be resumed | +| **FK handling** | Foreign key constraints and triggers are disabled during migration and re-enabled after | +| **Sequence fix** | PostgreSQL `SERIAL` sequences are reset to `MAX(id)` after bulk insert | +| **Verification** | Optional row-count comparison between source and target | +| **Live progress** | Spectre.Console progress bars with per-table rate tracking | +| **Graceful cancel** | `Ctrl+C` finishes the current batch and saves checkpoint | + +## Prerequisites + +- .NET 10 SDK +- Source: MSSQL database (SQL Server 2019+) +- Target: PostgreSQL database (16+) **with schema already created** + +> **Important:** The target PostgreSQL database must already have all tables created (e.g., via sensenet's installation process). The migrator only copies **data**, not schema. + +## Quick Start + +```bash +# 1. Edit connection strings +cp appsettings.json appsettings.local.json +nano appsettings.local.json + +# 2. Run +dotnet run +``` + +## Configuration + +Edit `appsettings.json` (or create `appsettings.local.json` for local overrides): + +```json +{ + "Migration": { + "Source": { + "Provider": "MsSql", + "ConnectionString": "Server=localhost,1433;Database=sensenet;User Id=sa;Password=...;TrustServerCertificate=true" + }, + "Target": { + "Provider": "PostgreSql", + "ConnectionString": "Host=localhost;Port=5432;Database=sensenet;Username=postgres;Password=..." + }, + "BatchSize": 5000, + "BlobBatchSize": 50, + "CheckpointFile": "./migration-checkpoint.json", + "SkipTables": [], + "TruncateTarget": true, + "DisableForeignKeys": true, + "FixSequences": true, + "Verify": true + } +} +``` + +### Settings + +| Setting | Default | Description | +|---|---|---| +| `BatchSize` | 5000 | Rows per batch for regular tables | +| `BlobBatchSize` | 50 | Rows per batch for blob tables (Files, EFMessages) | +| `CheckpointFile` | `./migration-checkpoint.json` | Path for checkpoint/resume file | +| `SkipTables` | `[]` | Tables to skip (e.g., `["LogEntries", "StatisticalData"]`) | +| `TruncateTarget` | `true` | Truncate target tables before migration | +| `DisableForeignKeys` | `true` | Disable FK constraints during migration | +| `FixSequences` | `true` | Reset SERIAL sequences to MAX(id) after migration | +| `Verify` | `true` | Verify row counts after migration | + +Settings can also be overridden via: +- Environment variables: `SNMIGRATE_Migration__BatchSize=10000` +- Command line: `dotnet run -- --Migration:BatchSize=10000` + +## Migration Pipeline + +``` +1. Pre-flight → Test connectivity to both databases +2. Discover → Determine which tables exist in source & target +3. Disable FKs → ALTER TABLE ... DISABLE TRIGGER ALL +4. Truncate → TRUNCATE TABLE ... CASCADE (if configured) +5. Migrate → Table-by-table in FK-dependency order + ├── Regular tables: COPY BINARY (batched by ID) + └── Blob tables: Parameterized INSERT (streaming) +6. Fix sequences → SELECT setval(seq, MAX(id)) +7. Enable FKs → ALTER TABLE ... ENABLE TRIGGER ALL + VALIDATE +8. Verify → Compare row counts source vs target +``` + +## Table Migration Order + +Tables are processed in FK-dependency order (parents first): + +1. `SchemaModification`, `PropertyTypes`, `ContentListTypes`, `NodeTypes` +2. `Nodes`, `Versions` +3. `LongTextProperties`, `ReferenceProperties` +4. `Files` *(blob)*, `BinaryProperties` +5. `TreeLocks`, `LogEntries`, `IndexingActivities`, `Packages`, `AccessTokens`, `SharedLocks` +6. `EFEntities`, `EFEntries`, `EFMemberships`, `EFMessages` *(blob)* +7. `StatisticalData`, `StatisticalAggregations`, `ExclusiveLocks` +8. `ClientApps`, `ClientSecrets` +9. `JournalItems`, `WorkflowNotification` *(optional)* + +## Type Mapping + +| MSSQL | PostgreSQL | Notes | +|---|---|---| +| `int IDENTITY` | `SERIAL` | Sequence reset after migration | +| `tinyint` | `SMALLINT` | | +| `bit` | `BOOLEAN` or `SMALLINT` | Depends on table (see code) | +| `datetime2` | `TIMESTAMP WITHOUT TIME ZONE` | | +| `timestamp` (rowversion) | `BIGINT` | Byte-reversed conversion | +| `uniqueidentifier` | `UUID` | | +| `nvarchar(N)` | `VARCHAR(N)` | | +| `nvarchar(MAX)` | `TEXT` | | +| `nvarchar(450) CI` | `CITEXT` | Only `Nodes.Path` | +| `varbinary(MAX)` | `BYTEA` | Streamed for blob tables | + +## Resume After Failure + +If the migration fails or is cancelled (`Ctrl+C`), a checkpoint file is saved. On next run, you'll be prompted to resume: + +``` +⚠ A checkpoint file exists. Resume previous migration? [Y/n] +``` + +The checkpoint tracks which tables have been completed and the last processed ID for the table that was in progress. + +## Skipping Tables + +For large databases, you may want to skip non-essential tables: + +```json +{ + "Migration": { + "SkipTables": ["LogEntries", "StatisticalData", "StatisticalAggregations", "IndexingActivities"] + } +} +``` + +## File Overview + +``` +tools/SnDbMigrator/ +├── SnDbMigrator.csproj # .NET 10 project file +├── appsettings.json # Default configuration +├── Program.cs # Entry point, config loading, banner +├── MigrationOptions.cs # Strongly-typed configuration +├── MigrationEngine.cs # Main orchestrator (8-step pipeline) +├── TableMigrator.cs # Per-table migration (COPY BINARY / INSERT) +├── SenseNetSchema.cs # Table definitions, FK list, migration order +├── SequenceFixer.cs # PostgreSQL SERIAL sequence reset +├── ForeignKeyManager.cs # FK constraint disable/enable +├── Checkpoint.cs # JSON checkpoint for resume support +└── README.md # This file +``` diff --git a/tools/SnDbMigrator/SenseNetSchema.cs b/tools/SnDbMigrator/SenseNetSchema.cs new file mode 100644 index 000000000..878ef611b --- /dev/null +++ b/tools/SnDbMigrator/SenseNetSchema.cs @@ -0,0 +1,103 @@ +namespace SnDbMigrator; + +/// +/// Defines the sensenet database schema: table order, columns, identity columns, +/// blob columns, and FK relationships. Used by the migrator to process tables +/// in the correct order and handle special column types. +/// +public static class SenseNetSchema +{ + /// + /// Tables in FK-dependency order (parents before children). + /// Tables without FKs can appear in any order. + /// + public static readonly TableDef[] Tables = + [ + // ── Schema & type definitions (no FKs) ───────────────── + new("SchemaModification", "SchemaModificationId"), + new("PropertyTypes", "PropertyTypeId"), + new("ContentListTypes", "ContentListTypeId"), + new("NodeTypes", "NodeTypeId"), + + // ── Core content tree ────────────────────────────────── + new("Nodes", "NodeId"), + new("Versions", "VersionId"), + + // ── Property storage ─────────────────────────────────── + new("LongTextProperties", "LongTextPropertyId"), + new("ReferenceProperties", "ReferencePropertyId"), + + // ── Files & binaries (BLOB!) ─────────────────────────── + new("Files", "FileId", IsBlob: true, BlobColumns: ["Stream"]), + new("BinaryProperties", "BinaryPropertyId"), + + // ── Operational tables ───────────────────────────────── + new("TreeLocks", "TreeLockId"), + new("LogEntries", "LogId"), + new("IndexingActivities", "IndexingActivityId"), + new("Packages", "Id"), + new("AccessTokens", "AccessTokenId"), + new("SharedLocks", "SharedLockId"), + + // ── Security tables ──────────────────────────────────── + new("EFEntities", IdentityColumn: null), // PK is Id but NOT auto-increment + new("EFEntries", IdentityColumn: null), // Composite PK + new("EFMemberships", IdentityColumn: null), // Composite PK + new("EFMessages", "Id", IsBlob: true, BlobColumns: ["Body"]), + + // ── Statistics & locks ───────────────────────────────── + new("StatisticalData", "Id"), + new("StatisticalAggregations", IdentityColumn: null), // Composite PK + new("ExclusiveLocks", "Id"), + + // ── Client apps ──────────────────────────────────────── + new("ClientApps", IdentityColumn: null), // PK is varchar + new("ClientSecrets", IdentityColumn: null), // PK is varchar + + // ── Journal & workflow (may not exist) ───────────────── + new("JournalItems", "Id", Optional: true), + new("WorkflowNotification", "NotificationId", Optional: true), + ]; + + /// + /// FK constraints that must be disabled before migration + /// and re-enabled after. Order: child → parent. + /// + public static readonly FkDef[] ForeignKeys = + [ + new("NodeTypes", "FK_NodeTypes_NodeTypes"), + new("Nodes", "FK_Nodes_NodeTypes"), + new("Nodes", "FK_Nodes_Parent"), + new("Nodes", "FK_Nodes_LockedBy"), + new("Nodes", "FK_Nodes_Nodes_CreatedById"), + new("Nodes", "FK_Nodes_Nodes_ModifiedById"), + new("Nodes", "FK_Nodes_Nodes_ContentListId"), + new("Versions", "FK_Versions_Nodes"), + new("Versions", "FK_Versions_Nodes_CreatedBy"), + new("Versions", "FK_Versions_Nodes_ModifiedBy"), + new("BinaryProperties", "FK_BinaryProperties_PropertyTypes"), + new("BinaryProperties", "FK_BinaryProperties_Versions"), + new("BinaryProperties", "FK_BinaryProperties_Files"), + new("ReferenceProperties", "FK_ReferenceProperties_PropertyTypes"), + new("LongTextProperties", "FK_LongTextProperties_PropertyTypes"), + new("LongTextProperties", "FK_LongTextProperties_Versions"), + new("EFEntities", "FK_EFEntities_EFEntities_ParentId"), + new("EFEntries", "FK_EFEntries_EFEntities_EFEntityId"), + ]; +} + +/// Table name (case-sensitive for PostgreSQL). +/// IDENTITY/SERIAL column name, or null if no auto-increment PK. +/// True if the table contains large binary columns. +/// Names of VARBINARY(MAX)/BYTEA columns. +/// True if the table may not exist in older installations. +public record TableDef( + string Name, + string? IdentityColumn = null, + bool IsBlob = false, + string[]? BlobColumns = null, + bool Optional = false); + +/// Table that owns the FK constraint. +/// Name of the FK constraint. +public record FkDef(string Table, string ConstraintName); diff --git a/tools/SnDbMigrator/SequenceFixer.cs b/tools/SnDbMigrator/SequenceFixer.cs new file mode 100644 index 000000000..d012de486 --- /dev/null +++ b/tools/SnDbMigrator/SequenceFixer.cs @@ -0,0 +1,79 @@ +using Npgsql; + +namespace SnDbMigrator; + +/// +/// Fixes PostgreSQL SERIAL sequences after bulk data migration. +/// When rows are inserted with explicit IDs (via COPY or INSERT with +/// overridden identity), the sequence stays at 1. This class resets +/// each sequence to MAX(id) + 1 so that new inserts get the correct ID. +/// +public sealed class SequenceFixer +{ + private readonly string _connectionString; + + public SequenceFixer(string connectionString) + { + _connectionString = connectionString; + } + + /// + /// Fixes the sequence for a single table's SERIAL column. + /// + public async Task FixSequenceAsync(string tableName, string identityColumn, + CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_connectionString); + await conn.OpenAsync(ct); + + // Find the sequence name for this column + var seqName = await GetSequenceName(conn, tableName, identityColumn, ct); + if (seqName == null) + return 0; + + // Get current max ID + await using var maxCmd = new NpgsqlCommand( + $"SELECT COALESCE(MAX(\"{identityColumn}\"), 0) FROM \"{tableName}\"", conn); + var maxId = Convert.ToInt64(await maxCmd.ExecuteScalarAsync(ct)); + + if (maxId <= 0) + return 0; + + // Reset the sequence + await using var setCmd = new NpgsqlCommand( + $"SELECT setval('{seqName}', @maxId)", conn); + setCmd.Parameters.AddWithValue("@maxId", maxId); + await setCmd.ExecuteScalarAsync(ct); + + return maxId; + } + + /// + /// Fixes sequences for all tables that have identity columns. + /// Returns a dictionary of table → new sequence value. + /// + public async Task> FixAllSequencesAsync( + IEnumerable tables, CancellationToken ct) + { + var results = new Dictionary(); + foreach (var table in tables) + { + if (table.IdentityColumn == null) continue; + + var newVal = await FixSequenceAsync(table.Name, table.IdentityColumn, ct); + results[table.Name] = newVal; + } + return results; + } + + private static async Task GetSequenceName(NpgsqlConnection conn, + string tableName, string columnName, CancellationToken ct) + { + await using var cmd = new NpgsqlCommand( + "SELECT pg_get_serial_sequence(@table, @column)", conn); + cmd.Parameters.AddWithValue("@table", tableName); + cmd.Parameters.AddWithValue("@column", columnName); + var result = await cmd.ExecuteScalarAsync(ct); + return result as string; + } +} diff --git a/tools/SnDbMigrator/SnDbMigrator.csproj b/tools/SnDbMigrator/SnDbMigrator.csproj new file mode 100644 index 000000000..24b4962fd --- /dev/null +++ b/tools/SnDbMigrator/SnDbMigrator.csproj @@ -0,0 +1,29 @@ + + + + Exe + net10.0 + enable + enable + SnDbMigrator + SnDbMigrator + + + + + + + + + + + + + + + + PreserveNewest + + + + diff --git a/tools/SnDbMigrator/TableMigrator.cs b/tools/SnDbMigrator/TableMigrator.cs new file mode 100644 index 000000000..f6d6a8c0a --- /dev/null +++ b/tools/SnDbMigrator/TableMigrator.cs @@ -0,0 +1,487 @@ +using System.Data; +using System.Diagnostics; +using Microsoft.Data.SqlClient; +using Npgsql; +using NpgsqlTypes; + +namespace SnDbMigrator; + +/// +/// Migrates a single table from MSSQL to PostgreSQL using streaming reads +/// and PostgreSQL COPY BINARY for maximum throughput. Blob columns are +/// streamed without loading entire values into memory. +/// +public sealed class TableMigrator +{ + private readonly MigrationOptions _options; + private readonly Checkpoint _checkpoint; + private readonly Action _onProgress; // tableName, rowsDone, totalRows + + // Columns whose MSSQL type differs from PG and need conversion + private static readonly HashSet BooleanColumns = new(StringComparer.OrdinalIgnoreCase) + { + "Staging", "IsDeleted", "Hidden", "IsInherited", "LocalOnly", "IsUser" + }; + + // MSSQL [timestamp] / rowversion columns → PG BIGINT + private static readonly HashSet RowVersionColumns = new(StringComparer.OrdinalIgnoreCase) + { + "Timestamp" + }; + + // MSSQL UNIQUEIDENTIFIER columns → PG UUID + private static readonly HashSet GuidColumns = new(StringComparer.OrdinalIgnoreCase) + { + "RowGuid", "WorkflowInstanceId" + }; + + public TableMigrator(MigrationOptions options, Checkpoint checkpoint, + Action onProgress) + { + _options = options; + _checkpoint = checkpoint; + _onProgress = onProgress; + } + + /// + /// Migrates one table. Returns the number of rows migrated. + /// + public async Task MigrateTableAsync(TableDef table, CancellationToken ct) + { + if (_checkpoint.IsTableCompleted(table.Name)) + return _checkpoint.Tables[table.Name].RowsMigrated; + + var totalRows = await GetSourceRowCount(table.Name, ct); + _onProgress(table.Name, 0, totalRows); + + if (totalRows == 0) + { + _checkpoint.MarkTableCompleted(table.Name, 0); + return 0; + } + + // Get column metadata from source + var columns = await GetColumnMetadata(table.Name, ct); + + if (table.IsBlob) + return await MigrateBlobTableAsync(table, columns, totalRows, ct); + else + return await MigrateRegularTableAsync(table, columns, totalRows, ct); + } + + /// + /// Regular (non-blob) table migration using PostgreSQL COPY BINARY for max throughput. + /// + private async Task MigrateRegularTableAsync( + TableDef table, ColumnMeta[] columns, long totalRows, CancellationToken ct) + { + long rowsDone = 0; + var batchSize = _options.BatchSize; + + // For tables with identity column, we can batch by ID for checkpoint/resume + if (table.IdentityColumn != null) + { + var lastId = _checkpoint.GetLastId(table.Name); + var maxId = await GetMaxId(table.Name, table.IdentityColumn, ct); + + while (lastId < maxId) + { + ct.ThrowIfCancellationRequested(); + var batchEnd = lastId + batchSize; + var sql = $"SELECT * FROM [{table.Name}] WHERE [{table.IdentityColumn}] > @lastId " + + $"AND [{table.IdentityColumn}] <= @batchEnd ORDER BY [{table.IdentityColumn}]"; + + await using var srcConn = new SqlConnection(_options.Source.ConnectionString); + await srcConn.OpenAsync(ct); + await using var cmd = new SqlCommand(sql, srcConn); + cmd.Parameters.AddWithValue("@lastId", lastId); + cmd.Parameters.AddWithValue("@batchEnd", batchEnd); + cmd.CommandTimeout = 600; + + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess, ct); + + var batchRows = await CopyBatchToPostgres(table, columns, reader, ct); + rowsDone += batchRows; + lastId = batchEnd; + + _checkpoint.MarkTableProgress(table.Name, lastId, rowsDone); + _onProgress(table.Name, rowsDone, totalRows); + } + } + else + { + // Tables without identity column: single pass, no resume + var sql = $"SELECT * FROM [{table.Name}]"; + await using var srcConn = new SqlConnection(_options.Source.ConnectionString); + await srcConn.OpenAsync(ct); + await using var cmd = new SqlCommand(sql, srcConn) { CommandTimeout = 600 }; + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess, ct); + + rowsDone = await CopyBatchToPostgres(table, columns, reader, ct); + _onProgress(table.Name, rowsDone, totalRows); + } + + _checkpoint.MarkTableCompleted(table.Name, rowsDone); + return rowsDone; + } + + /// + /// Blob table migration uses parameterized INSERTs with streaming to avoid + /// loading entire BLOBs into memory. + /// + private async Task MigrateBlobTableAsync( + TableDef table, ColumnMeta[] columns, long totalRows, CancellationToken ct) + { + long rowsDone = 0; + var batchSize = _options.BlobBatchSize; // smaller batches for blob tables + var blobCols = new HashSet(table.BlobColumns ?? [], StringComparer.OrdinalIgnoreCase); + + if (table.IdentityColumn == null) + throw new InvalidOperationException($"Blob table '{table.Name}' must have an identity column."); + + var lastId = _checkpoint.GetLastId(table.Name); + var maxId = await GetMaxId(table.Name, table.IdentityColumn, ct); + var colNames = columns.Select(c => $"\"{c.Name}\"").ToArray(); + var paramNames = columns.Select((c, i) => $"@p{i}").ToArray(); + var insertSql = $"INSERT INTO \"{table.Name}\" ({string.Join(", ", colNames)}) " + + $"VALUES ({string.Join(", ", paramNames)})"; + + while (lastId < maxId) + { + ct.ThrowIfCancellationRequested(); + var batchEnd = lastId + batchSize; + + var sql = $"SELECT * FROM [{table.Name}] WHERE [{table.IdentityColumn}] > @lastId " + + $"AND [{table.IdentityColumn}] <= @batchEnd ORDER BY [{table.IdentityColumn}]"; + + await using var srcConn = new SqlConnection(_options.Source.ConnectionString); + await srcConn.OpenAsync(ct); + await using var cmd = new SqlCommand(sql, srcConn); + cmd.Parameters.AddWithValue("@lastId", lastId); + cmd.Parameters.AddWithValue("@batchEnd", batchEnd); + cmd.CommandTimeout = 600; + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess, ct); + + await using var pgConn = new NpgsqlConnection(_options.Target.ConnectionString); + await pgConn.OpenAsync(ct); + + while (await reader.ReadAsync(ct)) + { + await using var insert = new NpgsqlCommand(insertSql, pgConn); + insert.CommandTimeout = 300; + + for (int i = 0; i < columns.Length; i++) + { + var col = columns[i]; + if (reader.IsDBNull(i)) + { + insert.Parameters.AddWithValue($"@p{i}", DBNull.Value); + } + else if (blobCols.Contains(col.Name)) + { + // Stream blob data + var blobBytes = (byte[])reader.GetValue(i); + insert.Parameters.AddWithValue($"@p{i}", blobBytes); + } + else + { + var value = ConvertValue(col, reader, i, table.Name); + insert.Parameters.AddWithValue($"@p{i}", value); + } + } + + await insert.ExecuteNonQueryAsync(ct); + rowsDone++; + } + + lastId = batchEnd; + _checkpoint.MarkTableProgress(table.Name, lastId, rowsDone); + _onProgress(table.Name, rowsDone, totalRows); + } + + _checkpoint.MarkTableCompleted(table.Name, rowsDone); + return rowsDone; + } + + /// + /// Writes rows from SqlDataReader to PostgreSQL using COPY BINARY. + /// Returns the number of rows written. + /// + private async Task CopyBatchToPostgres( + TableDef table, ColumnMeta[] columns, SqlDataReader reader, CancellationToken ct) + { + long rows = 0; + var colNames = string.Join(", ", columns.Select(c => $"\"{c.Name}\"")); + var copySql = $"COPY \"{table.Name}\" ({colNames}) FROM STDIN (FORMAT BINARY)"; + + await using var pgConn = new NpgsqlConnection(_options.Target.ConnectionString); + await pgConn.OpenAsync(ct); + + await using var writer = await pgConn.BeginBinaryImportAsync(copySql, ct); + + while (await reader.ReadAsync(ct)) + { + await writer.StartRowAsync(ct); + for (int i = 0; i < columns.Length; i++) + { + var col = columns[i]; + if (reader.IsDBNull(i)) + { + await writer.WriteNullAsync(ct); + } + else + { + await WriteBinaryValue(writer, col, reader, i, table.Name, ct); + } + } + rows++; + } + + await writer.CompleteAsync(ct); + return rows; + } + + /// + /// Writes a single value to the NpgsqlBinaryImporter with proper type mapping. + /// + private async Task WriteBinaryValue(NpgsqlBinaryImporter writer, + ColumnMeta col, SqlDataReader reader, int ordinal, string tableName, + CancellationToken ct) + { + // MSSQL timestamp (rowversion) → PG BIGINT + if (RowVersionColumns.Contains(col.Name) && col.SqlType == "timestamp") + { + var bytes = (byte[])reader.GetValue(ordinal); + var longVal = BitConverter.ToInt64(bytes.Reverse().ToArray(), 0); + await writer.WriteAsync(longVal, NpgsqlDbType.Bigint, ct); + return; + } + + // Boolean conversion: bit → BOOLEAN for specific columns + if (col.SqlType == "bit" && IsBooleanInPostgres(col.Name, tableName)) + { + var val = reader.GetBoolean(ordinal); + await writer.WriteAsync(val, NpgsqlDbType.Boolean, ct); + return; + } + + // tinyint → SMALLINT + if (col.SqlType == "tinyint") + { + var val = reader.GetByte(ordinal); + await writer.WriteAsync((short)val, NpgsqlDbType.Smallint, ct); + return; + } + + // UNIQUEIDENTIFIER → UUID + if (col.SqlType == "uniqueidentifier") + { + var val = reader.GetGuid(ordinal); + await writer.WriteAsync(val, NpgsqlDbType.Uuid, ct); + return; + } + + // datetime / datetime2 → TIMESTAMP + if (col.SqlType is "datetime" or "datetime2") + { + var val = reader.GetDateTime(ordinal); + await writer.WriteAsync(val, NpgsqlDbType.Timestamp, ct); + return; + } + + // ntext / nvarchar(max) → TEXT, nvarchar(N) → VARCHAR + if (col.SqlType is "ntext" or "nvarchar" or "varchar" or "nchar") + { + var val = reader.GetString(ordinal); + // CITEXT for Nodes.Path + if (tableName == "Nodes" && col.Name == "Path") + await writer.WriteAsync(val, NpgsqlDbType.Citext, ct); + else + await writer.WriteAsync(val, NpgsqlDbType.Text, ct); + return; + } + + // int → INT + if (col.SqlType == "int") + { + await writer.WriteAsync(reader.GetInt32(ordinal), NpgsqlDbType.Integer, ct); + return; + } + + // bigint → BIGINT + if (col.SqlType == "bigint") + { + await writer.WriteAsync(reader.GetInt64(ordinal), NpgsqlDbType.Bigint, ct); + return; + } + + // smallint → SMALLINT + if (col.SqlType == "smallint") + { + await writer.WriteAsync(reader.GetInt16(ordinal), NpgsqlDbType.Smallint, ct); + return; + } + + // bit → SMALLINT (for legacy sensenet columns that use SMALLINT in PG) + if (col.SqlType == "bit") + { + var val = reader.GetBoolean(ordinal); + await writer.WriteAsync(val ? (short)1 : (short)0, NpgsqlDbType.Smallint, ct); + return; + } + + // varbinary / image → BYTEA (should not hit here for regular tables) + if (col.SqlType is "varbinary" or "image") + { + var val = (byte[])reader.GetValue(ordinal); + await writer.WriteAsync(val, NpgsqlDbType.Bytea, ct); + return; + } + + // Fallback: write as-is and let Npgsql figure it out + var fallback = reader.GetValue(ordinal); + await writer.WriteAsync(fallback, ct); + } + + /// + /// Converts a value for parameterized INSERT (blob tables). + /// + private object ConvertValue(ColumnMeta col, SqlDataReader reader, int ordinal, string tableName) + { + // MSSQL timestamp (rowversion) → PG BIGINT + if (RowVersionColumns.Contains(col.Name) && col.SqlType == "timestamp") + { + var bytes = (byte[])reader.GetValue(ordinal); + return BitConverter.ToInt64(bytes.Reverse().ToArray(), 0); + } + + if (col.SqlType == "bit" && IsBooleanInPostgres(col.Name, tableName)) + return reader.GetBoolean(ordinal); + + if (col.SqlType == "bit") + return reader.GetBoolean(ordinal) ? (short)1 : (short)0; + + if (col.SqlType == "tinyint") + return (short)reader.GetByte(ordinal); + + return reader.GetValue(ordinal); + } + + /// + /// Determines whether a bit column should be mapped to BOOLEAN (true) or SMALLINT (false) + /// in PostgreSQL, based on the actual PG schema. + /// + private static bool IsBooleanInPostgres(string columnName, string tableName) + { + // Files: Staging, IsDeleted → BOOLEAN + if (tableName == "Files" && columnName is "Staging" or "IsDeleted") return true; + // JournalItems: Hidden → BOOLEAN + if (tableName == "JournalItems" && columnName == "Hidden") return true; + // EFEntities: IsInherited → BOOLEAN + if (tableName == "EFEntities" && columnName == "IsInherited") return true; + // EFEntries: LocalOnly → BOOLEAN + if (tableName == "EFEntries" && columnName == "LocalOnly") return true; + // EFMemberships: IsUser → BOOLEAN + if (tableName == "EFMemberships" && columnName == "IsUser") return true; + // Nodes: all bit columns → SMALLINT (legacy) + return false; + } + + /// Truncates a table in PostgreSQL (CASCADE). + public async Task TruncateTableAsync(string tableName, CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_options.Target.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new NpgsqlCommand( + $"TRUNCATE TABLE \"{tableName}\" CASCADE", conn); + cmd.CommandTimeout = 120; + await cmd.ExecuteNonQueryAsync(ct); + } + + // ── Helpers ───────────────────────────────────────────────── + + private async Task GetSourceRowCount(string tableName, CancellationToken ct) + { + await using var conn = new SqlConnection(_options.Source.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new SqlCommand( + $"SELECT COUNT_BIG(*) FROM [{tableName}]", conn); + cmd.CommandTimeout = 120; + return (long)(await cmd.ExecuteScalarAsync(ct))!; + } + + private async Task GetMaxId(string tableName, string idCol, CancellationToken ct) + { + await using var conn = new SqlConnection(_options.Source.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new SqlCommand( + $"SELECT ISNULL(MAX([{idCol}]), 0) FROM [{tableName}]", conn); + cmd.CommandTimeout = 60; + return Convert.ToInt64(await cmd.ExecuteScalarAsync(ct)); + } + + /// Gets column metadata from the source MSSQL table. + private async Task GetColumnMetadata(string tableName, CancellationToken ct) + { + await using var conn = new SqlConnection(_options.Source.ConnectionString); + await conn.OpenAsync(ct); + // Use schema query to get column order and types + await using var cmd = new SqlCommand( + @"SELECT c.COLUMN_NAME, c.DATA_TYPE, c.CHARACTER_MAXIMUM_LENGTH, + c.ORDINAL_POSITION + FROM INFORMATION_SCHEMA.COLUMNS c + WHERE c.TABLE_NAME = @table AND c.TABLE_SCHEMA = 'dbo' + ORDER BY c.ORDINAL_POSITION", conn); + cmd.Parameters.AddWithValue("@table", tableName); + await using var reader = await cmd.ExecuteReaderAsync(ct); + + var cols = new List(); + while (await reader.ReadAsync(ct)) + { + cols.Add(new ColumnMeta( + reader.GetString(0), // COLUMN_NAME + reader.GetString(1), // DATA_TYPE + reader.IsDBNull(2) ? null : reader.GetInt32(2) // CHARACTER_MAXIMUM_LENGTH + )); + } + return cols.ToArray(); + } + + /// Checks if a table exists in the source MSSQL database. + public async Task SourceTableExistsAsync(string tableName, CancellationToken ct) + { + await using var conn = new SqlConnection(_options.Source.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new SqlCommand( + "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = @table AND TABLE_SCHEMA = 'dbo'", + conn); + cmd.Parameters.AddWithValue("@table", tableName); + return (int)(await cmd.ExecuteScalarAsync(ct))! > 0; + } + + /// Checks if a table exists in the target PostgreSQL database. + public async Task TargetTableExistsAsync(string tableName, CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_options.Target.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new NpgsqlCommand( + "SELECT COUNT(*) FROM information_schema.tables WHERE table_name = @table AND table_schema = 'public'", + conn); + cmd.Parameters.AddWithValue("@table", tableName); + return (long)(await cmd.ExecuteScalarAsync(ct))! > 0; + } + + /// Gets the row count from the target PostgreSQL table. + public async Task GetTargetRowCount(string tableName, CancellationToken ct) + { + await using var conn = new NpgsqlConnection(_options.Target.ConnectionString); + await conn.OpenAsync(ct); + await using var cmd = new NpgsqlCommand( + $"SELECT COUNT(*) FROM \"{tableName}\"", conn); + cmd.CommandTimeout = 120; + return (long)(await cmd.ExecuteScalarAsync(ct))!; + } +} + +/// Column metadata from INFORMATION_SCHEMA. +public record ColumnMeta(string Name, string SqlType, int? MaxLength); diff --git a/tools/SnDbMigrator/appsettings.json b/tools/SnDbMigrator/appsettings.json new file mode 100644 index 000000000..d3c537c7c --- /dev/null +++ b/tools/SnDbMigrator/appsettings.json @@ -0,0 +1,20 @@ +{ + "Migration": { + "Source": { + "Provider": "MsSql", + "ConnectionString": "Server=localhost,9999;Database=sensenet-sndb;User Id=sa;Password=SuP3rS3CuR3P4sSw0Rd;TrustServerCertificate=true" + }, + "Target": { + "Provider": "PostgreSql", + "ConnectionString": "Host=localhost;Port=5532;Database=sensenet-sndb;Username=postgres;Password=SuP3rS3CuR3P4sSw0Rd" + }, + "BatchSize": 5000, + "BlobBatchSize": 50, + "CheckpointFile": "./migration-checkpoint.json", + "SkipTables": [], + "TruncateTarget": true, + "DisableForeignKeys": true, + "FixSequences": true, + "Verify": true + } +}