-
Notifications
You must be signed in to change notification settings - Fork 278
Expand file tree
/
Copy path.env.gcp.template
More file actions
134 lines (114 loc) · 5.29 KB
/
.env.gcp.template
File metadata and controls
134 lines (114 loc) · 5.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# ------------------------------------------- Required block -----------------------------------------------------------
# These variables are required and don't have default values
# You must provide values for these to run the deployment successfully
# your GCP project ID
GCP_PROJECT_ID=
# GCP region where the resources will be deployed, e.g. us-west1
GCP_REGION=
# GCP zone where the resources will be deployed, e.g. us-west1-a
GCP_ZONE=
# your domain or subdomain name, eg great-innovations.dev, e2b.great-innovations.dev
DOMAIN_NAME=
# your Postgres connection string,
# for Supabase format postgresql://postgres.<username>:<password>.<host>@<your-full-url-domain.com>:<port-number>/postgres
POSTGRES_CONNECTION_STRING=
# ------------------------------------------- Required with defaults ---------------------------------------------------
# These variables need to be specified but have default values
PROVIDER=gcp
# prefix identifier for all resources
PREFIX=e2b-
# prod, staging, dev
TERRAFORM_ENVIRONMENT=dev
# Cluster configuration
# {
# "cluster_size": 1, // Number of nodes (the actual number of nodes may be higher due to autoscaling)
# "machine": { // Machine type and CPU platform
# "type": "n1-standard-8",
# "min_cpu_platform": "Intel Skylake"
# },
# "autoscaler": {
# "size_max": 1, // Maximum number of nodes to scale up to
# "memory_target": 100, // Target memory utilization percentage for autoscaling (0-100)
# "cpu_target": 0.7 // Target CPU utilization percentage for autoscaling (0-1)
# },
# "boot_disk": {
# "disk_type": "pd-ssd", // Boot disk type (only persistent disks are currently supported)
# "size_gb": 100 // Boot disk size in GB
# },
# "cache_disks": {
# "disk_type": "local-ssd", // Cache disk type (local-ssd or pd-ssd)
# "size_gb": 375, // Cache disk size in GB
# "count": 1 // Number of cache disks
# }
# }
# Build cluster configuration
# Example:
BUILD_CLUSTERS_CONFIG='{"default": {"cluster_size": 1, "machine":{"type":"n1-standard-8","min_cpu_platform":"Intel Skylake"}, "boot_disk":{"disk_type":"pd-ssd","size_gb":200}, "cache_disks":{"disk_type":"local-ssd","size_gb":375,"count":1}}}'
# Client cluster configuration
# Example:
CLIENT_CLUSTERS_CONFIG='{"default": {"cluster_size": 1, "hugepages_percentage": 80, "machine":{"type":"n1-standard-8","min_cpu_platform":"Intel Skylake"}, "autoscaler": {"size_max": 2, "memory_target": 100, "cpu_target": 0.7}, "boot_disk":{"disk_type":"pd-ssd","size_gb":200}, "cache_disks":{"disk_type":"local-ssd","size_gb":375,"count":1}}}'
# This is the nomad and consul server (only for scheduling and service discovery)
# eg e2-standard-2
SERVER_MACHINE_TYPE=e2-standard-2
# e.g. 3
SERVER_CLUSTER_SIZE=3
# e.g. e2-standard-4
API_MACHINE_TYPE=e2-standard-4
# e.g. 1
API_CLUSTER_SIZE=1
# e.g. e2-standard-4
CLICKHOUSE_MACHINE_TYPE=e2-standard-4
# e.g. 1
CLICKHOUSE_CLUSTER_SIZE=1
# ------------------------------------------- Optional block -----------------------------------------------------------
# Following variables are optional and doesn't have to be set
# Dashboard API instance count (default: 0)
DASHBOARD_API_COUNT=
# Dashboard API Supabase DB connection string (default: POSTGRES_CONNECTION_STRING)
SUPABASE_DB_CONNECTION_STRING=
# Enable dashboard-api auth user sync background worker (default: false)
ENABLE_AUTH_USER_SYNC_BACKGROUND_WORKER=
# Filestore cache for builds shared across cluster (default:false)
FILESTORE_CACHE_ENABLED=
# BASIC_HDD for staging+dev, ZONAL for production
FILESTORE_CACHE_TIER=
# 1024 GB for staging+dev, 1024 GB or 10240 GB for production (10240 is different zonal capacity tier)
FILESTORE_CACHE_CAPACITY_GB=
# Boot disk types (pd-ssd, pd-balanced, pd-standard, pd-extreme), only some values can be based on the machine type
# Default: pd-ssd
API_BOOT_DISK_TYPE=
SERVER_BOOT_DISK_TYPE=
CLICKHOUSE_BOOT_DISK_TYPE=
LOKI_BOOT_DISK_TYPE=
# Managed Redis (default: false)
REDIS_MANAGED=
# Redis shard count (default: 1)
REDIS_SHARD_COUNT=
# GCP managed Redis/Valkey engine version (default: VALKEY_8_0)
GCP_REDIS_ENGINE_VERSION=
# Template bucket name (if you want to use a different bucket for templates then the default one)
# TEMPLATE_BUCKET_NAME=
# Default persistent volume type (default: "")
DEFAULT_PERSISTENT_VOLUME_TYPE=
# Persistent volume types (default: {})
PERSISTENT_VOLUME_TYPES=
# -------------------------------------- Variables for integration tests -----------------------------------------------
# Hash seed used for generating sandbox access tokens, not needed if you are not using them
SANDBOX_ACCESS_TOKEN_HASH_SEED=abcdefghijklmnopqrstuvwxyz
# Integration tests variables (only for running integration tests locally)
# your domain name, e.g. https://api.great-innovations.dev
TESTS_API_SERVER_URL=
# Host of the orchestrator, e.g. localhost:5008
# If connecting remotely, you might need to bridge the orchestrator connection as it's not publicly available
TESTS_ORCHESTRATOR_HOST=
# Envd proxy, e.g. https://client-proxy.great-innovations.dev
# This can be either session proxy or client proxy, depending on your local setup
TESTS_ENVD_PROXY=
# your sandbox template ID, e.g. base
TESTS_SANDBOX_TEMPLATE_ID=base
# your Team API key
TESTS_E2B_API_KEY=
# your access token
TESTS_E2B_ACCESS_TOKEN=
# your user id
TESTS_SANDBOX_USER_ID=