Python SDK
Storage
Persistent storage that survives container restarts. Your data lives on, backed by Cloudflare R2.
Quick Start
Enable storage with a single parameter:
from basilica import BasilicaClient
client = BasilicaClient()
deployment = client.deploy(
name="app",
source="app.py",
storage=True, # Mounts at /data
)Then use /data in your application:
from pathlib import Path
# Write data
Path('/data/counter.txt').write_text('42')
# Read data
count = Path('/data/counter.txt').read_text()Storage Options
Default Mount Path
deployment = client.deploy(
name="app",
source="app.py",
storage=True, # Mounts at /data (default)
)Custom Mount Path
deployment = client.deploy(
name="app",
source="app.py",
storage="/cache", # Mounts at /cache
)Named Volumes (Decorator Pattern)
For more control, use named volumes with the decorator:
import basilica
# Create or get a named volume
cache = basilica.Volume.from_name("model-cache", create_if_missing=True)
@basilica.deployment(
name="app",
volumes={"/models": cache}
)
def serve():
from pathlib import Path
models_dir = Path("/models")
# ...Volume API
Creating Volumes
import basilica
# Get existing volume or create if missing
volume = basilica.Volume.from_name(
name="my-volume", # Volume name (bucket identifier)
create_if_missing=True # Create if doesn't exist
)Multiple Volumes
Mount multiple volumes to different paths:
import basilica
data = basilica.Volume.from_name("app-data", create_if_missing=True)
cache = basilica.Volume.from_name("model-cache", create_if_missing=True)
@basilica.deployment(
name="ml-app",
volumes={
"/data": data,
"/cache": cache,
}
)
def serve():
# /data for user data
# /cache for model weights
passCommon Patterns
Persistent Counter
from basilica import BasilicaClient
from pathlib import Path
client = BasilicaClient()
deployment = client.deploy(
name="counter",
source="""
from pathlib import Path
from http.server import HTTPServer, BaseHTTPRequestHandler
COUNTER_FILE = Path('/data/count.txt')
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
# Read current count
count = int(COUNTER_FILE.read_text()) if COUNTER_FILE.exists() else 0
count += 1
# Save new count
COUNTER_FILE.write_text(str(count))
self.send_response(200)
self.end_headers()
self.wfile.write(f'Count: {count}'.encode())
HTTPServer(('', 8000), Handler).serve_forever()
""",
port=8000,
storage=True,
)Model Caching
Cache downloaded models to avoid re-downloading:
import basilica
cache = basilica.Volume.from_name("huggingface-cache", create_if_missing=True)
@basilica.deployment(
name="llm",
image="pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime",
gpu_count=1,
memory="16Gi",
volumes={"/root/.cache/huggingface": cache}
)
def serve():
from transformers import AutoModelForCausalLM, AutoTokenizer
# Model downloads once, then cached in volume
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b")
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b")
# Serve model...SQLite Database
Use SQLite for lightweight persistence:
from basilica import BasilicaClient
client = BasilicaClient()
deployment = client.deploy(
name="app",
source="""
import sqlite3
from pathlib import Path
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
DB_PATH = '/data/app.db'
def init_db():
conn = sqlite3.connect(DB_PATH)
conn.execute('CREATE TABLE IF NOT EXISTS visits (id INTEGER PRIMARY KEY, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP)')
conn.commit()
return conn
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
conn = init_db()
conn.execute('INSERT INTO visits DEFAULT VALUES')
conn.commit()
count = conn.execute('SELECT COUNT(*) FROM visits').fetchone()[0]
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps({'visits': count}).encode())
HTTPServer(('', 8000), Handler).serve_forever()
""",
port=8000,
storage=True,
)File Upload Service
import basilica
uploads = basilica.Volume.from_name("user-uploads", create_if_missing=True)
@basilica.deployment(
name="upload-service",
port=8000,
pip_packages=["fastapi", "uvicorn", "python-multipart"],
volumes={"/uploads": uploads}
)
def serve():
from fastapi import FastAPI, UploadFile
from pathlib import Path
import uvicorn
app = FastAPI()
UPLOAD_DIR = Path("/uploads")
@app.post("/upload")
async def upload(file: UploadFile):
path = UPLOAD_DIR / file.filename
with open(path, "wb") as f:
f.write(await file.read())
return {"path": str(path)}
@app.get("/files")
def list_files():
return {"files": [f.name for f in UPLOAD_DIR.iterdir()]}
uvicorn.run(app, host="0.0.0.0", port=8000)
deployment = serve()Technical Details
Storage Backend
| Property | Value |
|---|---|
| Backend | Cloudflare R2 |
| Mount Type | FUSE filesystem |
| Sync Interval | 1000ms |
| Cache Size | 1024MB (configurable) |
Readiness Detection
The storage system creates a marker file when ready:
from pathlib import Path
# Wait for storage to be ready
MARKER = Path('/data/.fuse_ready')
while not MARKER.exists():
time.sleep(0.1)
# Storage is now readyThe SDK handles readiness automatically. You only need manual checks for custom images or scripts.
Performance Considerations
- Write latency: Writes are cached locally and synced to R2 asynchronously
- Read latency: First read fetches from R2, subsequent reads are cached
- Sync interval: Data syncs every 1 second by default
- Large files: Consider streaming for files larger than cache size
Volume Naming
Volume names must be:
- Lowercase letters, numbers, and hyphens only
- 3-63 characters long
- Unique within your account
# Good names
basilica.Volume.from_name("my-data", create_if_missing=True)
basilica.Volume.from_name("cache-v2", create_if_missing=True)
basilica.Volume.from_name("prod-models", create_if_missing=True)
# Invalid names (will fail)
basilica.Volume.from_name("My Data") # No spaces, uppercase
basilica.Volume.from_name("cache_v2") # No underscores
basilica.Volume.from_name("a") # Too shortNext Steps
- Deploy GPU workloads with persistent model storage
- Browse storage examples