Configuration

Server and client configuration reference.

Server Configuration

Server configuration is stored in /etc/ebla/server.toml:

[server]
host = "0.0.0.0"
port = 6333
base_url = "https://files.example.com"

[database]
url = "postgres://ebla:secret@localhost/ebla"

[storage]
backend = "filesystem"  # or "s3", "minio", "gcs"
path = "/var/lib/ebla/blocks"

# For S3-compatible storage (AWS S3, MinIO, DigitalOcean Spaces, Backblaze B2):
# backend = "s3"
# bucket = "ebla-blocks"
# region = "us-east-1"
# access_key = "AKIA..."
# secret_key = "..."

# For Google Cloud Storage:
# backend = "gcs"
# bucket = "ebla-blocks"
# credentials_file = "/path/to/service-account.json"

[auth]
jwt_secret = "change-me-to-random-string"
token_expiry = "168h"  # 7 days
allow_signup = true

[logging]
level = "info"
format = "json"

[knowledge]
enabled = true

[knowledge.embedding]
provider = "openai"  # openai, ollama
model = "text-embedding-ada-002"
api_key = "sk-..."

[knowledge.llm]
provider = "openai"  # openai, anthropic, ollama
model = "gpt-4-turbo-preview"
api_key = "sk-..."
temperature = 0.3

[gc]
enabled = true       # Enable automatic garbage collection
interval = "24h"     # Run GC every 24 hours
min_age = "24h"      # Only delete blocks older than 24 hours

[backup]
enabled = true       # Enable automatic backups
interval = "24h"     # Run backup every 24 hours
retention = 7        # Keep 7 most recent backups
path = "/var/lib/ebla/backups"

Client Configuration

Client configuration is stored in ~/.ebla/config.toml:

[server]
url = "https://files.example.com"

[daemon]
poll_interval = "5m"
log_level = "info"

[pipeline]
# Number of concurrent chunking workers (0 = auto, uses min(NumCPU, 4))
chunk_workers = 0
# Number of concurrent upload workers
upload_workers = 8
# Maximum pending blocks in pipeline (limits memory usage)
max_pending_blocks = 1000
# Number of hashes per existence check request
existence_check_batch_size = 2000
# Max time to wait before flushing a partial existence check batch
existence_check_timeout = "200ms"
# Maximum retries per block upload
max_retries = 3

[pipeline.incremental_commit]
# Enable incremental commits (files appear on server as blocks upload)
enabled = true
# Switch to bulk mode when this many files are pending
file_threshold = 10
# Switch to bulk mode when this many bytes are pending
byte_threshold = "100MB"
# Max time between commits in bulk mode
time_threshold = "5s"
# Maximum files per commit in bulk mode
max_files_per_commit = 50

[p2p]
enabled = true
lan_only = false  # Set to true for LAN-only mode (no NAT traversal)

# NAT Traversal settings (for P2P across different networks)
[p2p.nat]
enabled = true  # Enable STUN/TURN for NAT traversal

# STUN servers for NAT detection (optional, uses Google STUN by default)
stun_servers = [
  "stun.l.google.com:19302",
  "stun.cloudflare.com:3478"
]

# TURN relay server for symmetric NAT (optional)
# Required when both peers are behind symmetric NAT
turn_server = "turn.example.com:3478"
turn_username = "user"
turn_password = "secret"

# Force TURN relay for debugging (optional)
prefer_relay = false

Ignore File

Place .eblaignore in the root of any synced folder:

# Patterns (gitignore syntax)
*.tmp
.DS_Store
node_modules/
.git/

# Never sync secrets
.env
*.pem

Storage Backends

Ebla supports multiple storage backends:

Backend Provider Use Case
filesystem Local disk Development, single-server
s3 AWS S3 Production, scalable
minio MinIO Self-hosted S3-compatible
gcs Google Cloud Storage GCP deployments
azure Azure Blob Storage Azure deployments

S3-Compatible Storage

Works with AWS S3, MinIO, DigitalOcean Spaces, Backblaze B2:

[storage]
backend = "s3"
bucket = "ebla-blocks"
region = "us-east-1"
access_key = "AKIA..."
secret_key = "..."
prefix = "blocks"  # Optional key prefix

# For MinIO or custom endpoints:
# endpoint = "localhost:9000"
# use_ssl = false
# force_path_style = true

Google Cloud Storage

[storage]
backend = "gcs"
bucket = "ebla-blocks"
credentials_file = "/path/to/service-account.json"  # Optional
prefix = "blocks"  # Optional key prefix

Azure Blob Storage

[storage]
backend = "azure"
container_name = "ebla-blocks"
account_name = "yourstorageaccount"
account_key = "..."  # Optional if using Azure AD/managed identity
prefix = "blocks"  # Optional key prefix

TLS Configuration

Option 1: User-provided certificates

[server]
tls_cert = "/etc/ebla/cert.pem"
tls_key = "/etc/ebla/key.pem"

Option 2: Caddy reverse proxy (auto-HTTPS)

Add Caddy to your docker-compose.yml for automatic Let's Encrypt certificates:

services:
  caddy:
    image: caddy:2-alpine
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./Caddyfile:/etc/caddy/Caddyfile
      - caddy_data:/data
    depends_on:
      - ebla-server

Caddyfile:

your-domain.com {
  reverse_proxy ebla-server:6333
}