Commit 388cf70

bryfry <bryon@fryer.io>
2026-01-26 20:27:23
ioc
1 parent 745623c
internal/cli/fetch.go
@@ -0,0 +1,90 @@
+package cli
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/crash/upvs/internal/config"
+	"github.com/crash/upvs/internal/output"
+	"github.com/crash/upvs/internal/pipeline"
+	"github.com/crash/upvs/internal/protect"
+	"github.com/spf13/cobra"
+)
+
+var fetchCmd = &cobra.Command{
+	Use:   "fetch",
+	Short: "Download video clips",
+	Long:  "Download clips listed in clip_index.json with concurrent workers and retry support.",
+	RunE:  runFetch,
+}
+
+func init() {
+	fetchCmd.Flags().StringVar(&dateStr, "date", "", "Target date YYYY-MM-DD (required)")
+	fetchCmd.Flags().IntVar(&cfg.Workers, "workers", config.DefaultWorkers, "Number of concurrent download workers")
+	fetchCmd.Flags().IntVar(&cfg.Retries, "retries", config.DefaultRetries, "Number of retry attempts per clip")
+	fetchCmd.MarkFlagRequired("date")
+}
+
+func runFetch(cmd *cobra.Command, args []string) error {
+	// Parse date
+	date, err := config.ParseDate(dateStr)
+	if err != nil {
+		return err
+	}
+	cfg.Date = date
+
+	// Validate required fields
+	if cfg.Host == "" {
+		return config.ErrMissingHost
+	}
+	if cfg.Username == "" || cfg.Password == "" {
+		return config.ErrMissingCredentials
+	}
+	if cfg.Camera == "" {
+		return config.ErrMissingCamera
+	}
+	if cfg.OutDir == "" {
+		return config.ErrMissingOutDir
+	}
+
+	// Create layout
+	layout := output.NewLayout(cfg.OutDir, cfg.Camera, cfg.DateString())
+
+	// Create client
+	var opts []protect.ClientOption
+	if cfg.TLSInsecure {
+		opts = append(opts, protect.WithTLSInsecure())
+	}
+	if cfg.DirectAPI {
+		opts = append(opts, protect.WithDirectAPI())
+	}
+	client := protect.NewClient(cfg.Host, opts...)
+
+	// Login
+	ctx := context.Background()
+	err = client.Login(ctx, cfg.Username, cfg.Password)
+	if err != nil {
+		return fmt.Errorf("login: %w", err)
+	}
+
+	// Run fetch
+	result, err := pipeline.Fetch(ctx, client, layout, pipeline.FetchConfig{
+		Workers: cfg.Workers,
+		Retries: cfg.Retries,
+	})
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf("Fetch complete: %d downloaded, %d skipped, %d failed\n",
+		result.Downloaded, result.Skipped, result.Failed)
+
+	if result.Failed > 0 {
+		fmt.Println("Failed clips:")
+		for _, e := range result.Errors {
+			fmt.Printf("  - %v\n", e)
+		}
+	}
+
+	return nil
+}
internal/cli/render.go
@@ -0,0 +1,69 @@
+package cli
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/crash/upvs/internal/config"
+	"github.com/crash/upvs/internal/output"
+	"github.com/crash/upvs/internal/pipeline"
+	"github.com/spf13/cobra"
+)
+
+var renderCmd = &cobra.Command{
+	Use:   "render",
+	Short: "Generate timelapse video",
+	Long:  "Render downloaded clips into a timelapse video using FFmpeg.",
+	RunE:  runRender,
+}
+
+func init() {
+	renderCmd.Flags().StringVar(&dateStr, "date", "", "Target date YYYY-MM-DD (required)")
+	renderCmd.Flags().IntVar(&cfg.TargetSecs, "target", config.DefaultTargetSecs, "Target output duration in seconds")
+	renderCmd.Flags().IntVar(&cfg.FPS, "fps", config.DefaultFPS, "Output frame rate")
+	renderCmd.Flags().IntVar(&cfg.CRF, "crf", config.DefaultCRF, "FFmpeg CRF quality (0-51, lower=better)")
+	renderCmd.Flags().StringVar(&cfg.Preset, "preset", config.DefaultPreset, "FFmpeg preset (ultrafast to veryslow)")
+	renderCmd.MarkFlagRequired("date")
+}
+
+func runRender(cmd *cobra.Command, args []string) error {
+	// Parse date
+	date, err := config.ParseDate(dateStr)
+	if err != nil {
+		return err
+	}
+	cfg.Date = date
+
+	// Validate required fields
+	if cfg.Camera == "" {
+		return config.ErrMissingCamera
+	}
+	if cfg.OutDir == "" {
+		return config.ErrMissingOutDir
+	}
+
+	// Create layout
+	layout := output.NewLayout(cfg.OutDir, cfg.Camera, cfg.DateString())
+
+	// Run render
+	ctx := context.Background()
+	result, err := pipeline.Render(ctx, layout, pipeline.RenderConfig{
+		TargetSecs:   cfg.TargetSecs,
+		FPS:          cfg.FPS,
+		CRF:          cfg.CRF,
+		Preset:       cfg.Preset,
+		MinSpeedMult: cfg.MinSpeedMult,
+		MaxSpeedMult: cfg.MaxSpeedMult,
+	})
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf("Render complete!\n")
+	fmt.Printf("  Output: %s\n", result.OutputPath)
+	fmt.Printf("  Speed factor: %.2fx\n", result.SpeedFactor)
+	fmt.Printf("  Duration: %.1f seconds\n", result.OutputDuration)
+	fmt.Printf("  Clips: %d/%d\n", result.Metadata.ValidClips, result.Metadata.TotalClips)
+
+	return nil
+}
internal/cli/root.go
@@ -0,0 +1,95 @@
+// Package cli implements the command-line interface.
+package cli
+
+import (
+	"fmt"
+	"log/slog"
+	"os"
+
+	"github.com/crash/upvs/internal/config"
+	"github.com/spf13/cobra"
+)
+
+var cfg = config.New()
+
+// Global flag variables for file-based password loading
+var passwordFile string
+
+// rootCmd is the base command.
+var rootCmd = &cobra.Command{
+	Use:   "upvs",
+	Short: "UniFi Protect Video Summarizer",
+	Long:  "Create ~10-minute timelapses from UniFi Protect camera recordings for a single day.",
+	PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+		// Load password from file if specified
+		if passwordFile != "" && cfg.Password == "" {
+			err := cfg.LoadPasswordFromFile(passwordFile)
+			if err != nil {
+				return err
+			}
+		}
+
+		// Set up logging
+		level := slog.LevelInfo
+		if cfg.Verbose {
+			level = slog.LevelDebug
+		}
+		handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: level})
+		slog.SetDefault(slog.New(handler))
+
+		return nil
+	},
+}
+
+func init() {
+	// Silence usage and errors on failure - we handle error printing in Execute()
+	rootCmd.SilenceUsage = true
+	rootCmd.SilenceErrors = true
+
+	// Connection flags
+	rootCmd.PersistentFlags().StringVar(&cfg.Host, "host", "", "UniFi Protect URL (env: UPVS_HOST)")
+	rootCmd.PersistentFlags().StringVar(&cfg.Username, "username", "", "Username (env: UPVS_USERNAME)")
+	rootCmd.PersistentFlags().StringVar(&cfg.Password, "password", "", "Password (env: UPVS_PASSWORD)")
+	rootCmd.PersistentFlags().StringVar(&passwordFile, "password-file", "", "Path to file containing password")
+	rootCmd.PersistentFlags().BoolVar(&cfg.TLSInsecure, "tls-insecure", false, "Skip TLS verification")
+	rootCmd.PersistentFlags().BoolVar(&cfg.DirectAPI, "direct-api", false, "Use /api path (for direct NVR connection, not UniFi OS)")
+
+	// Target selection flags
+	rootCmd.PersistentFlags().StringVar(&cfg.Camera, "camera", "", "Camera ID or name")
+
+	// Output flags
+	rootCmd.PersistentFlags().StringVar(&cfg.OutDir, "out", "", "Output directory")
+	rootCmd.PersistentFlags().BoolVarP(&cfg.Verbose, "verbose", "v", false, "Enable debug logging")
+
+	// Bind environment variables
+	bindEnv("UPVS_HOST", &cfg.Host)
+	bindEnv("UPVS_USERNAME", &cfg.Username)
+	bindEnv("UPVS_PASSWORD", &cfg.Password)
+
+	// Add subcommands
+	rootCmd.AddCommand(scanCmd)
+	rootCmd.AddCommand(fetchCmd)
+	rootCmd.AddCommand(renderCmd)
+	rootCmd.AddCommand(runCmd)
+}
+
+// bindEnv sets a flag default from an environment variable if the flag is empty.
+func bindEnv(envVar string, target *string) {
+	if val := os.Getenv(envVar); val != "" && *target == "" {
+		*target = val
+	}
+}
+
+// Execute runs the root command.
+func Execute() {
+	err := rootCmd.Execute()
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+
+// GetConfig returns the global configuration.
+func GetConfig() *config.Config {
+	return cfg
+}
internal/cli/run.go
@@ -0,0 +1,116 @@
+package cli
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/crash/upvs/internal/config"
+	"github.com/crash/upvs/internal/output"
+	"github.com/crash/upvs/internal/pipeline"
+	"github.com/crash/upvs/internal/protect"
+	"github.com/spf13/cobra"
+)
+
+var runCmd = &cobra.Command{
+	Use:   "run",
+	Short: "Run full pipeline (scan + fetch + render)",
+	Long:  "Execute all phases: enumerate events, download clips, and render timelapse.",
+	RunE:  runAll,
+}
+
+func init() {
+	runCmd.Flags().StringVar(&dateStr, "date", "", "Target date YYYY-MM-DD (required)")
+	runCmd.Flags().IntVar(&cfg.Workers, "workers", config.DefaultWorkers, "Number of concurrent download workers")
+	runCmd.Flags().IntVar(&cfg.Retries, "retries", config.DefaultRetries, "Number of retry attempts per clip")
+	runCmd.Flags().IntVar(&cfg.TargetSecs, "target", config.DefaultTargetSecs, "Target output duration in seconds")
+	runCmd.Flags().IntVar(&cfg.FPS, "fps", config.DefaultFPS, "Output frame rate")
+	runCmd.Flags().IntVar(&cfg.CRF, "crf", config.DefaultCRF, "FFmpeg CRF quality (0-51, lower=better)")
+	runCmd.Flags().StringVar(&cfg.Preset, "preset", config.DefaultPreset, "FFmpeg preset (ultrafast to veryslow)")
+	runCmd.MarkFlagRequired("date")
+}
+
+func runAll(cmd *cobra.Command, args []string) error {
+	// Parse date
+	date, err := config.ParseDate(dateStr)
+	if err != nil {
+		return err
+	}
+	cfg.Date = date
+
+	// Validate all required fields
+	err = cfg.Validate()
+	if err != nil {
+		return err
+	}
+
+	// Create layout and ensure directories
+	layout := output.NewLayout(cfg.OutDir, cfg.Camera, cfg.DateString())
+	err = layout.EnsureDirs()
+	if err != nil {
+		return err
+	}
+
+	// Create client
+	var opts []protect.ClientOption
+	if cfg.TLSInsecure {
+		opts = append(opts, protect.WithTLSInsecure())
+	}
+	if cfg.DirectAPI {
+		opts = append(opts, protect.WithDirectAPI())
+	}
+	client := protect.NewClient(cfg.Host, opts...)
+
+	// Login
+	ctx := context.Background()
+	err = client.Login(ctx, cfg.Username, cfg.Password)
+	if err != nil {
+		return fmt.Errorf("login: %w", err)
+	}
+
+	// Phase A: Scan
+	fmt.Println("=== Phase 1: Scanning events ===")
+	scanResult, err := pipeline.Scan(ctx, client, layout, cfg.Camera, cfg.Date)
+	if err != nil {
+		return fmt.Errorf("scan phase: %w", err)
+	}
+	fmt.Printf("Found %d events for camera %s\n\n", scanResult.EventCount, scanResult.Camera.Name)
+
+	if scanResult.EventCount == 0 {
+		fmt.Println("No events found for this day. Nothing to do.")
+		return nil
+	}
+
+	// Phase B: Fetch
+	fmt.Println("=== Phase 2: Downloading clips ===")
+	fetchResult, err := pipeline.Fetch(ctx, client, layout, pipeline.FetchConfig{
+		Workers: cfg.Workers,
+		Retries: cfg.Retries,
+	})
+	if err != nil {
+		return fmt.Errorf("fetch phase: %w", err)
+	}
+	fmt.Printf("Downloaded %d, skipped %d, failed %d\n\n",
+		fetchResult.Downloaded, fetchResult.Skipped, fetchResult.Failed)
+
+	// Phase C-E: Render (includes manifest and speed calculation)
+	fmt.Println("=== Phase 3: Rendering timelapse ===")
+	renderResult, err := pipeline.Render(ctx, layout, pipeline.RenderConfig{
+		TargetSecs:   cfg.TargetSecs,
+		FPS:          cfg.FPS,
+		CRF:          cfg.CRF,
+		Preset:       cfg.Preset,
+		MinSpeedMult: cfg.MinSpeedMult,
+		MaxSpeedMult: cfg.MaxSpeedMult,
+	})
+	if err != nil {
+		return fmt.Errorf("render phase: %w", err)
+	}
+
+	fmt.Println()
+	fmt.Println("=== Complete ===")
+	fmt.Printf("Output: %s\n", renderResult.OutputPath)
+	fmt.Printf("Duration: %.1f seconds (%.1fx speed)\n",
+		renderResult.OutputDuration, renderResult.SpeedFactor)
+
+	return nil
+}
internal/cli/scan.go
@@ -0,0 +1,85 @@
+package cli
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/crash/upvs/internal/config"
+	"github.com/crash/upvs/internal/output"
+	"github.com/crash/upvs/internal/pipeline"
+	"github.com/crash/upvs/internal/protect"
+	"github.com/spf13/cobra"
+)
+
+var dateStr string
+
+var scanCmd = &cobra.Command{
+	Use:   "scan",
+	Short: "Enumerate events and create clip index",
+	Long:  "Scan a day's events from UniFi Protect and write clip_index.json.",
+	RunE:  runScan,
+}
+
+func init() {
+	scanCmd.Flags().StringVar(&dateStr, "date", "", "Target date YYYY-MM-DD (required)")
+	scanCmd.MarkFlagRequired("date")
+}
+
+func runScan(cmd *cobra.Command, args []string) error {
+	// Parse date
+	date, err := config.ParseDate(dateStr)
+	if err != nil {
+		return err
+	}
+	cfg.Date = date
+
+	// Validate required fields for scan
+	if cfg.Host == "" {
+		return config.ErrMissingHost
+	}
+	if cfg.Username == "" || cfg.Password == "" {
+		return config.ErrMissingCredentials
+	}
+	if cfg.Camera == "" {
+		return config.ErrMissingCamera
+	}
+	if cfg.OutDir == "" {
+		return config.ErrMissingOutDir
+	}
+
+	// Create layout and ensure directories
+	layout := output.NewLayout(cfg.OutDir, cfg.Camera, cfg.DateString())
+	err = layout.EnsureDirs()
+	if err != nil {
+		return err
+	}
+
+	// Create client
+	var opts []protect.ClientOption
+	if cfg.TLSInsecure {
+		opts = append(opts, protect.WithTLSInsecure())
+	}
+	if cfg.DirectAPI {
+		opts = append(opts, protect.WithDirectAPI())
+	}
+	client := protect.NewClient(cfg.Host, opts...)
+
+	// Login
+	ctx := context.Background()
+	err = client.Login(ctx, cfg.Username, cfg.Password)
+	if err != nil {
+		return fmt.Errorf("login: %w", err)
+	}
+
+	// Run scan
+	result, err := pipeline.Scan(ctx, client, layout, cfg.Camera, cfg.Date)
+	if err != nil {
+		return err
+	}
+
+	fmt.Printf("Scan complete: %d events for camera %s (%s)\n",
+		result.EventCount, result.Camera.Name, result.Camera.ID)
+	fmt.Printf("Clip index: %s\n", layout.ClipIndexPath())
+
+	return nil
+}
internal/config/config.go
@@ -0,0 +1,129 @@
+// Package config defines the configuration for the upvs CLI.
+package config
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"os"
+	"strings"
+	"time"
+)
+
+// Default values for configuration.
+const (
+	DefaultWorkers      = 4
+	DefaultRetries      = 3
+	DefaultTargetSecs   = 600 // 10 minutes
+	DefaultFPS          = 30
+	DefaultCRF          = 23
+	DefaultPreset       = "medium"
+	DefaultMinSpeedMult = 1.0
+	DefaultMaxSpeedMult = 2000.0
+)
+
+// Config holds all configuration for a pipeline run.
+type Config struct {
+	// Connection settings
+	Host        string
+	Username    string
+	Password    string
+	TLSInsecure bool
+	DirectAPI   bool // Use /api instead of /proxy/protect/api
+
+	// Target selection
+	Camera string
+	Date   time.Time
+
+	// Output settings
+	OutDir  string
+	Verbose bool
+
+	// Fetch settings
+	Workers int
+	Retries int
+
+	// Render settings
+	TargetSecs   int
+	FPS          int
+	CRF          int
+	Preset       string
+	MinSpeedMult float64
+	MaxSpeedMult float64
+}
+
+// New creates a Config with default values.
+func New() *Config {
+	return &Config{
+		Workers:      DefaultWorkers,
+		Retries:      DefaultRetries,
+		TargetSecs:   DefaultTargetSecs,
+		FPS:          DefaultFPS,
+		CRF:          DefaultCRF,
+		Preset:       DefaultPreset,
+		MinSpeedMult: DefaultMinSpeedMult,
+		MaxSpeedMult: DefaultMaxSpeedMult,
+	}
+}
+
+// Sentinel errors for configuration validation.
+var (
+	ErrMissingHost        = errors.New("host is required")
+	ErrMissingCredentials = errors.New("username and password are required")
+	ErrMissingCamera      = errors.New("camera is required")
+	ErrMissingDate        = errors.New("date is required")
+	ErrMissingOutDir      = errors.New("out directory is required")
+	ErrInvalidHost        = errors.New("invalid host URL")
+	ErrInvalidDate        = errors.New("invalid date format (expected YYYY-MM-DD)")
+)
+
+// Validate checks that all required fields are set and valid.
+func (c *Config) Validate() error {
+	if c.Host == "" {
+		return ErrMissingHost
+	}
+	_, err := url.Parse(c.Host)
+	if err != nil {
+		return fmt.Errorf("%w: %v", ErrInvalidHost, err)
+	}
+	if c.Username == "" || c.Password == "" {
+		return ErrMissingCredentials
+	}
+	if c.Camera == "" {
+		return ErrMissingCamera
+	}
+	if c.Date.IsZero() {
+		return ErrMissingDate
+	}
+	if c.OutDir == "" {
+		return ErrMissingOutDir
+	}
+	return nil
+}
+
+// LoadPasswordFromFile reads the password from a file if Password is empty.
+func (c *Config) LoadPasswordFromFile(path string) error {
+	if path == "" {
+		return nil
+	}
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return fmt.Errorf("reading password-file: %w", err)
+	}
+	c.Password = strings.TrimSpace(string(data))
+	return nil
+}
+
+// ParseDate parses a date string in YYYY-MM-DD format.
+func ParseDate(s string) (time.Time, error) {
+	t, err := time.Parse("2006-01-02", s)
+	if err != nil {
+		return time.Time{}, ErrInvalidDate
+	}
+	return t, nil
+}
+
+// DateString returns the date formatted as YYYY-MM-DD.
+func (c *Config) DateString() string {
+	return c.Date.Format("2006-01-02")
+}
internal/config/config_test.go
@@ -0,0 +1,132 @@
+package config
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"testing"
+	"time"
+)
+
+func TestValidate(t *testing.T) {
+	validCfg := &Config{
+		Host:     "https://protect.local",
+		Username: "admin",
+		Password: "secret",
+		Camera:   "front-door",
+		Date:     time.Now(),
+		OutDir:   "/tmp/out",
+	}
+
+	err := validCfg.Validate()
+	if err != nil {
+		t.Errorf("expected valid config, got error: %v", err)
+	}
+}
+
+func TestValidate_MissingFields(t *testing.T) {
+	tests := []struct {
+		name     string
+		modify   func(*Config)
+		expected error
+	}{
+		{
+			name:     "missing host",
+			modify:   func(c *Config) { c.Host = "" },
+			expected: ErrMissingHost,
+		},
+		{
+			name:     "missing username",
+			modify:   func(c *Config) { c.Username = "" },
+			expected: ErrMissingCredentials,
+		},
+		{
+			name:     "missing password",
+			modify:   func(c *Config) { c.Password = "" },
+			expected: ErrMissingCredentials,
+		},
+		{
+			name:     "missing camera",
+			modify:   func(c *Config) { c.Camera = "" },
+			expected: ErrMissingCamera,
+		},
+		{
+			name:     "missing date",
+			modify:   func(c *Config) { c.Date = time.Time{} },
+			expected: ErrMissingDate,
+		},
+		{
+			name:     "missing out dir",
+			modify:   func(c *Config) { c.OutDir = "" },
+			expected: ErrMissingOutDir,
+		},
+	}
+
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			cfg := &Config{
+				Host:     "https://protect.local",
+				Username: "admin",
+				Password: "secret",
+				Camera:   "front-door",
+				Date:     time.Now(),
+				OutDir:   "/tmp/out",
+			}
+			tc.modify(cfg)
+
+			err := cfg.Validate()
+			if !errors.Is(err, tc.expected) {
+				t.Errorf("expected %v, got %v", tc.expected, err)
+			}
+		})
+	}
+}
+
+func TestParseDate(t *testing.T) {
+	tests := []struct {
+		input    string
+		valid    bool
+		expected string
+	}{
+		{"2024-01-15", true, "2024-01-15"},
+		{"2024-12-31", true, "2024-12-31"},
+		{"invalid", false, ""},
+		{"01-15-2024", false, ""},
+		{"2024/01/15", false, ""},
+	}
+
+	for _, tc := range tests {
+		result, err := ParseDate(tc.input)
+		if tc.valid {
+			if err != nil {
+				t.Errorf("ParseDate(%q) unexpected error: %v", tc.input, err)
+			}
+			if result.Format("2006-01-02") != tc.expected {
+				t.Errorf("ParseDate(%q) = %v, want %v", tc.input, result.Format("2006-01-02"), tc.expected)
+			}
+		} else {
+			if err == nil {
+				t.Errorf("ParseDate(%q) expected error, got nil", tc.input)
+			}
+		}
+	}
+}
+
+func TestLoadPasswordFromFile(t *testing.T) {
+	// Create temp file with password
+	dir := t.TempDir()
+	passFile := filepath.Join(dir, "password")
+	err := os.WriteFile(passFile, []byte("  my-secret-pass  \n"), 0600)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cfg := New()
+	err = cfg.LoadPasswordFromFile(passFile)
+	if err != nil {
+		t.Errorf("LoadPasswordFromFile error: %v", err)
+	}
+	if cfg.Password != "my-secret-pass" {
+		t.Errorf("expected 'my-secret-pass', got %q", cfg.Password)
+	}
+}
internal/ffmpeg/concat.go
@@ -0,0 +1,35 @@
+// Package ffmpeg provides FFmpeg integration for timelapse generation.
+package ffmpeg
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// GenerateConcatFile creates an FFmpeg concat demuxer file.
+// Each path in the list is written as "file '<path>'" on its own line.
+// Paths are converted to absolute paths since ffmpeg's concat demuxer
+// resolves relative paths relative to the concat file's directory.
+func GenerateConcatFile(outputPath string, clipPaths []string) error {
+	var b strings.Builder
+
+	for _, path := range clipPaths {
+		// Convert to absolute path for ffmpeg concat demuxer
+		absPath, err := filepath.Abs(path)
+		if err != nil {
+			return fmt.Errorf("getting absolute path for %s: %w", path, err)
+		}
+		// Escape single quotes in path by replacing ' with '\''
+		escaped := strings.ReplaceAll(absPath, "'", "'\\''")
+		fmt.Fprintf(&b, "file '%s'\n", escaped)
+	}
+
+	err := os.WriteFile(outputPath, []byte(b.String()), 0644)
+	if err != nil {
+		return fmt.Errorf("writing concat file: %w", err)
+	}
+
+	return nil
+}
internal/ffmpeg/runner.go
@@ -0,0 +1,90 @@
+package ffmpeg
+
+import (
+	"context"
+	"fmt"
+	"os/exec"
+	"strconv"
+)
+
+// RenderConfig holds FFmpeg rendering parameters.
+type RenderConfig struct {
+	ConcatFile   string
+	OutputPath   string
+	SpeedFactor  float64
+	FPS          int
+	CRF          int
+	Preset       string
+}
+
+// Render executes FFmpeg to create the timelapse.
+func Render(ctx context.Context, cfg RenderConfig) error {
+	// Build setpts filter for speed adjustment
+	// setpts=PTS/N makes video N times faster
+	setptsFilter := fmt.Sprintf("setpts=PTS/%s", strconv.FormatFloat(cfg.SpeedFactor, 'f', 6, 64))
+
+	args := []string{
+		"-f", "concat",
+		"-safe", "0",
+		"-i", cfg.ConcatFile,
+		"-vf", setptsFilter,
+		"-r", strconv.Itoa(cfg.FPS),
+		"-c:v", "libx264",
+		"-crf", strconv.Itoa(cfg.CRF),
+		"-preset", cfg.Preset,
+		"-pix_fmt", "yuv420p",
+		"-an", // No audio
+		"-y",  // Overwrite output
+		cfg.OutputPath,
+	}
+
+	cmd := exec.CommandContext(ctx, "ffmpeg", args...)
+
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("ffmpeg failed: %w\noutput: %s", err, string(output))
+	}
+
+	return nil
+}
+
+// Probe checks if a video file is valid using ffprobe.
+func Probe(ctx context.Context, path string) error {
+	args := []string{
+		"-v", "error",
+		"-show_entries", "format=duration",
+		"-of", "default=noprint_wrappers=1:nokey=1",
+		path,
+	}
+
+	cmd := exec.CommandContext(ctx, "ffprobe", args...)
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("ffprobe failed: %w\noutput: %s", err, string(output))
+	}
+
+	return nil
+}
+
+// GetDuration returns the duration of a video file in seconds.
+func GetDuration(ctx context.Context, path string) (float64, error) {
+	args := []string{
+		"-v", "error",
+		"-show_entries", "format=duration",
+		"-of", "default=noprint_wrappers=1:nokey=1",
+		path,
+	}
+
+	cmd := exec.CommandContext(ctx, "ffprobe", args...)
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		return 0, fmt.Errorf("ffprobe failed: %w", err)
+	}
+
+	duration, err := strconv.ParseFloat(string(output[:len(output)-1]), 64)
+	if err != nil {
+		return 0, fmt.Errorf("parsing duration: %w", err)
+	}
+
+	return duration, nil
+}
internal/manifest/clipindex.go
@@ -0,0 +1,101 @@
+package manifest
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"sort"
+	"time"
+)
+
+// NewClipIndex creates a new ClipIndex.
+func NewClipIndex(cameraID, cameraName, date string) *ClipIndex {
+	now := time.Now()
+	return &ClipIndex{
+		CameraID:   cameraID,
+		CameraName: cameraName,
+		Date:       date,
+		CreatedAt:  now,
+		UpdatedAt:  now,
+		Clips:      make([]*ClipEntry, 0),
+	}
+}
+
+// AddClip adds a clip entry to the index.
+func (ci *ClipIndex) AddClip(entry *ClipEntry) {
+	ci.Clips = append(ci.Clips, entry)
+	ci.UpdatedAt = time.Now()
+}
+
+// Sort orders clips by (start_ms, end_ms, event_id) for deterministic ordering.
+func (ci *ClipIndex) Sort() {
+	sort.Slice(ci.Clips, func(i, j int) bool {
+		a, b := ci.Clips[i], ci.Clips[j]
+		if a.StartMs != b.StartMs {
+			return a.StartMs < b.StartMs
+		}
+		if a.EndMs != b.EndMs {
+			return a.EndMs < b.EndMs
+		}
+		return a.EventID < b.EventID
+	})
+}
+
+// FindByEventID returns the clip entry with the given event ID, or nil.
+func (ci *ClipIndex) FindByEventID(eventID string) *ClipEntry {
+	for _, c := range ci.Clips {
+		if c.EventID == eventID {
+			return c
+		}
+	}
+	return nil
+}
+
+// Write saves the clip index to a JSON file.
+func (ci *ClipIndex) Write(path string) error {
+	ci.UpdatedAt = time.Now()
+	ci.Sort()
+
+	data, err := json.MarshalIndent(ci, "", "  ")
+	if err != nil {
+		return fmt.Errorf("marshaling clip index: %w", err)
+	}
+
+	err = os.WriteFile(path, data, 0644)
+	if err != nil {
+		return fmt.Errorf("writing clip index: %w", err)
+	}
+
+	return nil
+}
+
+// ReadClipIndex loads a clip index from a JSON file.
+func ReadClipIndex(path string) (*ClipIndex, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return nil, fmt.Errorf("reading clip index: %w", err)
+	}
+
+	var ci ClipIndex
+	err = json.Unmarshal(data, &ci)
+	if err != nil {
+		return nil, fmt.Errorf("parsing clip index: %w", err)
+	}
+
+	return &ci, nil
+}
+
+// WriteDayMetadata saves day metadata to a JSON file.
+func WriteDayMetadata(path string, meta *DayMetadata) error {
+	data, err := json.MarshalIndent(meta, "", "  ")
+	if err != nil {
+		return fmt.Errorf("marshaling day metadata: %w", err)
+	}
+
+	err = os.WriteFile(path, data, 0644)
+	if err != nil {
+		return fmt.Errorf("writing day metadata: %w", err)
+	}
+
+	return nil
+}
internal/manifest/types.go
@@ -0,0 +1,94 @@
+// Package manifest defines types for tracking clip download state.
+package manifest
+
+import "time"
+
+// ClipStatus represents the download state of a clip.
+type ClipStatus string
+
+const (
+	StatusPending    ClipStatus = "pending"
+	StatusInProgress ClipStatus = "in_progress"
+	StatusComplete   ClipStatus = "complete"
+	StatusFailed     ClipStatus = "failed"
+	StatusSkipped    ClipStatus = "skipped"
+)
+
+// ClipEntry represents a single video clip from an event.
+type ClipEntry struct {
+	EventID     string     `json:"event_id"`
+	StartMs     int64      `json:"start_ms"`
+	EndMs       int64      `json:"end_ms"`
+	DurationMs  int64      `json:"duration_ms"`
+	EventType   string     `json:"event_type"`
+	SmartTypes  []string   `json:"smart_types,omitempty"`
+	Score       int        `json:"score"`
+	Status      ClipStatus `json:"status"`
+	FilePath    string     `json:"file_path,omitempty"`
+	FileSize    int64      `json:"file_size,omitempty"`
+	Error       string     `json:"error,omitempty"`
+	DownloadedAt *time.Time `json:"downloaded_at,omitempty"`
+}
+
+// DurationSecs returns the clip duration in seconds.
+func (c *ClipEntry) DurationSecs() float64 {
+	return float64(c.DurationMs) / 1000.0
+}
+
+// ClipIndex holds all clips for a day with metadata.
+type ClipIndex struct {
+	CameraID   string       `json:"camera_id"`
+	CameraName string       `json:"camera_name"`
+	Date       string       `json:"date"`
+	CreatedAt  time.Time    `json:"created_at"`
+	UpdatedAt  time.Time    `json:"updated_at"`
+	Clips      []*ClipEntry `json:"clips"`
+}
+
+// TotalDurationMs returns the sum of all clip durations in milliseconds.
+func (ci *ClipIndex) TotalDurationMs() int64 {
+	var total int64
+	for _, c := range ci.Clips {
+		total += c.DurationMs
+	}
+	return total
+}
+
+// TotalDurationSecs returns the sum of all clip durations in seconds.
+func (ci *ClipIndex) TotalDurationSecs() float64 {
+	return float64(ci.TotalDurationMs()) / 1000.0
+}
+
+// CompletedClips returns only clips with status Complete.
+func (ci *ClipIndex) CompletedClips() []*ClipEntry {
+	var completed []*ClipEntry
+	for _, c := range ci.Clips {
+		if c.Status == StatusComplete {
+			completed = append(completed, c)
+		}
+	}
+	return completed
+}
+
+// Stats returns counts by status.
+func (ci *ClipIndex) Stats() map[ClipStatus]int {
+	stats := make(map[ClipStatus]int)
+	for _, c := range ci.Clips {
+		stats[c.Status]++
+	}
+	return stats
+}
+
+// DayMetadata holds computed information about the day's timelapse.
+type DayMetadata struct {
+	CameraID      string    `json:"camera_id"`
+	CameraName    string    `json:"camera_name"`
+	Date          string    `json:"date"`
+	TotalClips    int       `json:"total_clips"`
+	ValidClips    int       `json:"valid_clips"`
+	TotalDuration float64   `json:"total_duration_secs"`
+	SpeedFactor   float64   `json:"speed_factor"`
+	TargetSecs    int       `json:"target_secs"`
+	OutputDuration float64  `json:"output_duration_secs"`
+	CreatedAt     time.Time `json:"created_at"`
+}
internal/output/layout.go
@@ -0,0 +1,116 @@
+// Package output manages the output directory structure.
+package output
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/crash/upvs/internal/sanitize"
+)
+
+// Layout manages paths within the output directory.
+type Layout struct {
+	root   string
+	camera string
+	date   string
+}
+
+// NewLayout creates a Layout for the given output directory, camera, and date.
+func NewLayout(outDir, camera, date string) *Layout {
+	return &Layout{
+		root:   outDir,
+		camera: sanitize.Filename(camera),
+		date:   date,
+	}
+}
+
+// EnsureDirs creates all required directories in the output structure.
+func (l *Layout) EnsureDirs() error {
+	dirs := []string{
+		l.MetadataDir(),
+		l.ClipsDir(),
+		l.ManifestsDir(),
+		l.TimelapseDir(),
+		l.LogsDir(),
+	}
+	for _, dir := range dirs {
+		err := os.MkdirAll(dir, 0755)
+		if err != nil {
+			return fmt.Errorf("creating directory %s: %w", dir, err)
+		}
+	}
+	return nil
+}
+
+// Root returns the root output directory.
+func (l *Layout) Root() string {
+	return l.root
+}
+
+// MetadataDir returns the path to the metadata directory.
+func (l *Layout) MetadataDir() string {
+	return filepath.Join(l.root, "metadata")
+}
+
+// CameraJSONPath returns the path to camera.json.
+func (l *Layout) CameraJSONPath() string {
+	return filepath.Join(l.MetadataDir(), "camera.json")
+}
+
+// DayJSONPath returns the path to day.json.
+func (l *Layout) DayJSONPath() string {
+	return filepath.Join(l.MetadataDir(), "day.json")
+}
+
+// ClipsDir returns the path to the clips directory for this camera/date.
+func (l *Layout) ClipsDir() string {
+	return filepath.Join(l.root, "clips", l.camera, l.date)
+}
+
+// ClipPath returns the path for a specific clip file.
+func (l *Layout) ClipPath(startMs, endMs int64, eventID string) string {
+	filename := fmt.Sprintf("clip_%d_%d_%s.mp4", startMs, endMs, sanitize.Filename(eventID))
+	return filepath.Join(l.ClipsDir(), filename)
+}
+
+// ClipPartialPath returns the path for a partial (in-progress) clip download.
+func (l *Layout) ClipPartialPath(startMs, endMs int64, eventID string) string {
+	return l.ClipPath(startMs, endMs, eventID) + ".partial"
+}
+
+// ManifestsDir returns the path to the manifests directory.
+func (l *Layout) ManifestsDir() string {
+	return filepath.Join(l.root, "manifests")
+}
+
+// ClipIndexPath returns the path to clip_index.json.
+func (l *Layout) ClipIndexPath() string {
+	return filepath.Join(l.ManifestsDir(), "clip_index.json")
+}
+
+// ConcatTxtPath returns the path to concat.txt for FFmpeg.
+func (l *Layout) ConcatTxtPath() string {
+	return filepath.Join(l.ManifestsDir(), "concat.txt")
+}
+
+// TimelapseDir returns the path to the timelapse output directory.
+func (l *Layout) TimelapseDir() string {
+	return filepath.Join(l.root, "timelapse")
+}
+
+// TimelapsePath returns the path to the final timelapse video.
+func (l *Layout) TimelapsePath() string {
+	filename := fmt.Sprintf("%s_%s_timelapse.mp4", l.camera, l.date)
+	return filepath.Join(l.TimelapseDir(), filename)
+}
+
+// LogsDir returns the path to the logs directory.
+func (l *Layout) LogsDir() string {
+	return filepath.Join(l.root, "logs")
+}
+
+// RunLogPath returns the path to run.log.
+func (l *Layout) RunLogPath() string {
+	return filepath.Join(l.LogsDir(), "run.log")
+}
internal/pipeline/fetch.go
@@ -0,0 +1,166 @@
+package pipeline
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+	"os"
+	"time"
+
+	"github.com/crash/upvs/internal/manifest"
+	"github.com/crash/upvs/internal/output"
+	"github.com/crash/upvs/internal/protect"
+	"github.com/crash/upvs/internal/retry"
+	"github.com/crash/upvs/internal/workerpool"
+)
+
+// FetchConfig holds configuration for the fetch phase.
+type FetchConfig struct {
+	Workers int
+	Retries int
+}
+
+// FetchResult holds the output of the fetch phase.
+type FetchResult struct {
+	Downloaded int
+	Skipped    int
+	Failed     int
+	Errors     []error
+}
+
+// Fetch downloads clips concurrently with retry support.
+func Fetch(ctx context.Context, client *protect.Client, layout *output.Layout, cfg FetchConfig) (*FetchResult, error) {
+	clipIndex, err := manifest.ReadClipIndex(layout.ClipIndexPath())
+	if err != nil {
+		return nil, fmt.Errorf("reading clip index: %w", err)
+	}
+
+	slog.Info("starting fetch",
+		slog.Int("clips", len(clipIndex.Clips)),
+		slog.Int("workers", cfg.Workers))
+
+	result := &FetchResult{}
+	tasks := make([]workerpool.Task, 0, len(clipIndex.Clips))
+	cameraID := clipIndex.CameraID
+
+	for _, clip := range clipIndex.Clips {
+		// Skip if already complete
+		if clip.Status == manifest.StatusComplete {
+			if fileExists(clip.FilePath) {
+				result.Skipped++
+				continue
+			}
+		}
+
+		task := func(ctx context.Context) error {
+			return downloadClip(ctx, client, cameraID, clip, cfg.Retries)
+		}
+		tasks = append(tasks, task)
+	}
+
+	if len(tasks) == 0 {
+		slog.Info("no clips to download")
+		return result, nil
+	}
+
+	pool := workerpool.New(cfg.Workers)
+	results := pool.RunWithProgress(ctx, tasks, func(completed, total int) {
+		slog.Info("fetch progress",
+			slog.Int("completed", completed),
+			slog.Int("total", total))
+	})
+
+	// Count results
+	for _, r := range results {
+		if r.Error != nil {
+			result.Failed++
+			result.Errors = append(result.Errors, r.Error)
+		} else {
+			result.Downloaded++
+		}
+	}
+
+	// Save updated clip index
+	err = clipIndex.Write(layout.ClipIndexPath())
+	if err != nil {
+		return result, fmt.Errorf("writing clip index: %w", err)
+	}
+
+	slog.Info("fetch complete",
+		slog.Int("downloaded", result.Downloaded),
+		slog.Int("skipped", result.Skipped),
+		slog.Int("failed", result.Failed))
+
+	return result, nil
+}
+
+func downloadClip(ctx context.Context, client *protect.Client, cameraID string, clip *manifest.ClipEntry, maxRetries int) error {
+	partialPath := clip.FilePath + ".partial"
+
+	// Clean up any existing partial file
+	os.Remove(partialPath)
+
+	clip.Status = manifest.StatusInProgress
+
+	retryCfg := retry.DefaultConfig()
+	retryCfg.MaxAttempts = maxRetries
+
+	err := retry.Do(ctx, retryCfg, func() error {
+		return doDownload(ctx, client, cameraID, clip, partialPath)
+	})
+
+	if err != nil {
+		clip.Status = manifest.StatusFailed
+		clip.Error = err.Error()
+		return fmt.Errorf("downloading clip (event_id=%s): %w", clip.EventID, err)
+	}
+
+	// Atomic rename
+	err = os.Rename(partialPath, clip.FilePath)
+	if err != nil {
+		clip.Status = manifest.StatusFailed
+		clip.Error = err.Error()
+		return fmt.Errorf("renaming clip (event_id=%s): %w", clip.EventID, err)
+	}
+
+	// Get file size
+	info, err := os.Stat(clip.FilePath)
+	if err == nil {
+		clip.FileSize = info.Size()
+	}
+
+	now := time.Now()
+	clip.DownloadedAt = &now
+	clip.Status = manifest.StatusComplete
+	clip.Error = ""
+
+	return nil
+}
+
+func doDownload(ctx context.Context, client *protect.Client, cameraID string, clip *manifest.ClipEntry, partialPath string) error {
+	f, err := os.Create(partialPath)
+	if err != nil {
+		return fmt.Errorf("creating file: %w", err)
+	}
+	defer f.Close()
+
+	err = client.DownloadClip(ctx, cameraID, clip.StartMs, clip.EndMs, f)
+	if err != nil {
+		return err
+	}
+
+	err = f.Sync()
+	if err != nil {
+		return fmt.Errorf("syncing file: %w", err)
+	}
+
+	return nil
+}
+
+func fileExists(path string) bool {
+	info, err := os.Stat(path)
+	if err != nil {
+		return false
+	}
+	return info.Size() > 0
+}
internal/pipeline/manifest.go
@@ -0,0 +1,85 @@
+package pipeline
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+
+	"github.com/crash/upvs/internal/ffmpeg"
+	"github.com/crash/upvs/internal/manifest"
+	"github.com/crash/upvs/internal/output"
+)
+
+// ManifestResult holds the output of the manifest phase.
+type ManifestResult struct {
+	ValidClips    int
+	TotalDuration float64 // seconds
+}
+
+// BuildManifest filters valid clips and generates the FFmpeg concat file.
+func BuildManifest(ctx context.Context, layout *output.Layout) (*ManifestResult, error) {
+	clipIndex, err := manifest.ReadClipIndex(layout.ClipIndexPath())
+	if err != nil {
+		return nil, fmt.Errorf("reading clip index: %w", err)
+	}
+
+	slog.Info("building manifest", slog.Int("total_clips", len(clipIndex.Clips)))
+
+	var validPaths []string
+	var totalDurationMs int64
+
+	for _, clip := range clipIndex.Clips {
+		if clip.Status != manifest.StatusComplete {
+			continue
+		}
+
+		// Verify file exists and is valid
+		if !fileExists(clip.FilePath) {
+			slog.Warn("clip file missing",
+				slog.String("event_id", clip.EventID),
+				slog.String("path", clip.FilePath))
+			continue
+		}
+
+		// Optionally verify with ffprobe
+		err := ffmpeg.Probe(ctx, clip.FilePath)
+		if err != nil {
+			slog.Warn("clip invalid",
+				slog.String("event_id", clip.EventID),
+				slog.String("error", err.Error()))
+			clip.Status = manifest.StatusFailed
+			clip.Error = "invalid video file"
+			continue
+		}
+
+		validPaths = append(validPaths, clip.FilePath)
+		totalDurationMs += clip.DurationMs
+	}
+
+	if len(validPaths) == 0 {
+		return nil, fmt.Errorf("no valid clips to render")
+	}
+
+	err = ffmpeg.GenerateConcatFile(layout.ConcatTxtPath(), validPaths)
+	if err != nil {
+		return nil, err
+	}
+
+	// Update clip index with any status changes
+	err = clipIndex.Write(layout.ClipIndexPath())
+	if err != nil {
+		return nil, err
+	}
+
+	totalDuration := float64(totalDurationMs) / 1000.0
+
+	slog.Info("manifest built",
+		slog.Int("valid_clips", len(validPaths)),
+		slog.Float64("total_duration_secs", totalDuration),
+		slog.String("concat_path", layout.ConcatTxtPath()))
+
+	return &ManifestResult{
+		ValidClips:    len(validPaths),
+		TotalDuration: totalDuration,
+	}, nil
+}
internal/pipeline/render.go
@@ -0,0 +1,114 @@
+package pipeline
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+	"time"
+
+	"github.com/crash/upvs/internal/ffmpeg"
+	"github.com/crash/upvs/internal/manifest"
+	"github.com/crash/upvs/internal/output"
+)
+
+// RenderConfig holds configuration for the render phase.
+type RenderConfig struct {
+	TargetSecs   int
+	FPS          int
+	CRF          int
+	Preset       string
+	MinSpeedMult float64
+	MaxSpeedMult float64
+}
+
+// DefaultRenderConfig returns default render configuration.
+func DefaultRenderConfig() RenderConfig {
+	return RenderConfig{
+		TargetSecs:   600,
+		FPS:          30,
+		CRF:          23,
+		Preset:       "medium",
+		MinSpeedMult: 1.0,
+		MaxSpeedMult: 2000.0,
+	}
+}
+
+// RenderResult holds the output of the render phase.
+type RenderResult struct {
+	OutputPath     string
+	SpeedFactor    float64
+	OutputDuration float64
+	Metadata       *manifest.DayMetadata
+}
+
+// Render executes FFmpeg to create the timelapse.
+func Render(ctx context.Context, layout *output.Layout, cfg RenderConfig) (*RenderResult, error) {
+	// Build manifest first
+	manifestResult, err := BuildManifest(ctx, layout)
+	if err != nil {
+		return nil, err
+	}
+
+	// Calculate speed
+	speedCfg := SpeedConfig{
+		TargetSecs:   cfg.TargetSecs,
+		MinSpeedMult: cfg.MinSpeedMult,
+		MaxSpeedMult: cfg.MaxSpeedMult,
+	}
+	speedFactor, outputDuration := CalculateSpeed(manifestResult.TotalDuration, speedCfg)
+
+	slog.Info("starting render",
+		slog.Float64("speed_factor", speedFactor),
+		slog.Float64("expected_duration", outputDuration),
+		slog.String("output", layout.TimelapsePath()))
+
+	// Run FFmpeg
+	ffmpegCfg := ffmpeg.RenderConfig{
+		ConcatFile:  layout.ConcatTxtPath(),
+		OutputPath:  layout.TimelapsePath(),
+		SpeedFactor: speedFactor,
+		FPS:         cfg.FPS,
+		CRF:         cfg.CRF,
+		Preset:      cfg.Preset,
+	}
+
+	err = ffmpeg.Render(ctx, ffmpegCfg)
+	if err != nil {
+		return nil, fmt.Errorf("rendering timelapse: %w", err)
+	}
+
+	// Read clip index to get camera info
+	clipIndex, err := manifest.ReadClipIndex(layout.ClipIndexPath())
+	if err != nil {
+		return nil, fmt.Errorf("reading clip index: %w", err)
+	}
+
+	// Save day metadata
+	dayMeta := &manifest.DayMetadata{
+		CameraID:       clipIndex.CameraID,
+		CameraName:     clipIndex.CameraName,
+		Date:           clipIndex.Date,
+		TotalClips:     len(clipIndex.Clips),
+		ValidClips:     manifestResult.ValidClips,
+		TotalDuration:  manifestResult.TotalDuration,
+		SpeedFactor:    speedFactor,
+		TargetSecs:     cfg.TargetSecs,
+		OutputDuration: outputDuration,
+		CreatedAt:      time.Now(),
+	}
+
+	err = manifest.WriteDayMetadata(layout.DayJSONPath(), dayMeta)
+	if err != nil {
+		return nil, err
+	}
+
+	slog.Info("render complete",
+		slog.String("output", layout.TimelapsePath()))
+
+	return &RenderResult{
+		OutputPath:     layout.TimelapsePath(),
+		SpeedFactor:    speedFactor,
+		OutputDuration: outputDuration,
+		Metadata:       dayMeta,
+	}, nil
+}
internal/pipeline/scan.go
@@ -0,0 +1,103 @@
+// Package pipeline implements the timelapse generation phases.
+package pipeline
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"log/slog"
+	"os"
+	"time"
+
+	"github.com/crash/upvs/internal/manifest"
+	"github.com/crash/upvs/internal/output"
+	"github.com/crash/upvs/internal/protect"
+)
+
+// ScanResult holds the output of the scan phase.
+type ScanResult struct {
+	Camera     *protect.Camera
+	ClipIndex  *manifest.ClipIndex
+	EventCount int
+}
+
+// Scan enumerates events for a day and creates the clip index.
+func Scan(ctx context.Context, client *protect.Client, layout *output.Layout, cameraRef string, date time.Time) (*ScanResult, error) {
+	slog.Info("resolving camera", slog.String("camera", cameraRef))
+
+	camera, err := client.ResolveCamera(ctx, cameraRef)
+	if err != nil {
+		return nil, fmt.Errorf("resolving camera: %w", err)
+	}
+
+	slog.Info("camera resolved",
+		slog.String("id", camera.ID),
+		slog.String("name", camera.Name))
+
+	// Save camera info
+	err = writeCameraJSON(layout.CameraJSONPath(), camera)
+	if err != nil {
+		return nil, err
+	}
+
+	slog.Info("fetching events",
+		slog.String("camera_id", camera.ID),
+		slog.String("date", date.Format("2006-01-02")))
+
+	events, err := client.ListDayEvents(ctx, camera.ID, date)
+	if err != nil {
+		return nil, fmt.Errorf("listing events: %w", err)
+	}
+
+	slog.Info("events found", slog.Int("count", len(events)))
+
+	// Build clip index
+	dateStr := date.Format("2006-01-02")
+	clipIndex := manifest.NewClipIndex(camera.ID, camera.Name, dateStr)
+
+	for _, event := range events {
+		entry := &manifest.ClipEntry{
+			EventID:    event.ID,
+			StartMs:    event.Start,
+			EndMs:      event.End,
+			DurationMs: event.End - event.Start,
+			EventType:  event.Type,
+			SmartTypes: event.SmartTypes,
+			Score:      event.Score,
+			Status:     manifest.StatusPending,
+			FilePath:   layout.ClipPath(event.Start, event.End, event.ID),
+		}
+		clipIndex.AddClip(entry)
+	}
+
+	clipIndex.Sort()
+
+	err = clipIndex.Write(layout.ClipIndexPath())
+	if err != nil {
+		return nil, err
+	}
+
+	slog.Info("clip index written",
+		slog.String("path", layout.ClipIndexPath()),
+		slog.Int("clips", len(clipIndex.Clips)))
+
+	return &ScanResult{
+		Camera:     camera,
+		ClipIndex:  clipIndex,
+		EventCount: len(events),
+	}, nil
+}
+
+func writeCameraJSON(path string, camera *protect.Camera) error {
+	data, err := json.MarshalIndent(camera, "", "  ")
+	if err != nil {
+		return fmt.Errorf("marshaling camera: %w", err)
+	}
+
+	err = os.WriteFile(path, data, 0644)
+	if err != nil {
+		return fmt.Errorf("writing camera.json: %w", err)
+	}
+
+	return nil
+}
internal/pipeline/speed.go
@@ -0,0 +1,53 @@
+package pipeline
+
+import (
+	"log/slog"
+)
+
+// SpeedConfig holds parameters for speed calculation.
+type SpeedConfig struct {
+	TargetSecs   int     // Target output duration (default 600 = 10 min)
+	MinSpeedMult float64 // Minimum speed multiplier (default 1.0)
+	MaxSpeedMult float64 // Maximum speed multiplier (default 2000.0)
+}
+
+// DefaultSpeedConfig returns default speed configuration.
+func DefaultSpeedConfig() SpeedConfig {
+	return SpeedConfig{
+		TargetSecs:   600,
+		MinSpeedMult: 1.0,
+		MaxSpeedMult: 2000.0,
+	}
+}
+
+// CalculateSpeed computes the speed factor to achieve target duration.
+// Returns the clamped speed factor and expected output duration.
+func CalculateSpeed(totalDurationSecs float64, cfg SpeedConfig) (speedFactor float64, outputDuration float64) {
+	if totalDurationSecs <= 0 {
+		return cfg.MinSpeedMult, 0
+	}
+
+	targetDuration := float64(cfg.TargetSecs)
+
+	// speed = total / target
+	speedFactor = totalDurationSecs / targetDuration
+
+	// Clamp to allowed range
+	if speedFactor < cfg.MinSpeedMult {
+		speedFactor = cfg.MinSpeedMult
+	}
+	if speedFactor > cfg.MaxSpeedMult {
+		speedFactor = cfg.MaxSpeedMult
+	}
+
+	// Calculate actual output duration with clamped speed
+	outputDuration = totalDurationSecs / speedFactor
+
+	slog.Info("speed calculated",
+		slog.Float64("total_duration_secs", totalDurationSecs),
+		slog.Int("target_secs", cfg.TargetSecs),
+		slog.Float64("speed_factor", speedFactor),
+		slog.Float64("output_duration_secs", outputDuration))
+
+	return speedFactor, outputDuration
+}
internal/pipeline/speed_test.go
@@ -0,0 +1,81 @@
+package pipeline
+
+import (
+	"math"
+	"testing"
+)
+
+func TestCalculateSpeed(t *testing.T) {
+	tests := []struct {
+		name           string
+		totalDuration  float64
+		cfg            SpeedConfig
+		expectedSpeed  float64
+		expectedOutput float64
+	}{
+		{
+			name:          "exact target",
+			totalDuration: 6000, // 100 minutes
+			cfg: SpeedConfig{
+				TargetSecs:   600,
+				MinSpeedMult: 1.0,
+				MaxSpeedMult: 2000.0,
+			},
+			expectedSpeed:  10.0,
+			expectedOutput: 600.0,
+		},
+		{
+			name:          "less than target",
+			totalDuration: 300, // 5 minutes
+			cfg: SpeedConfig{
+				TargetSecs:   600,
+				MinSpeedMult: 1.0,
+				MaxSpeedMult: 2000.0,
+			},
+			expectedSpeed:  1.0, // Clamped to min
+			expectedOutput: 300.0,
+		},
+		{
+			name:          "very long duration",
+			totalDuration: 86400, // 24 hours
+			cfg: SpeedConfig{
+				TargetSecs:   600,
+				MinSpeedMult: 1.0,
+				MaxSpeedMult: 2000.0,
+			},
+			expectedSpeed:  144.0, // 86400 / 600
+			expectedOutput: 600.0,
+		},
+		{
+			name:          "exceeds max speed",
+			totalDuration: 2000000, // Very long
+			cfg: SpeedConfig{
+				TargetSecs:   600,
+				MinSpeedMult: 1.0,
+				MaxSpeedMult: 100.0,
+			},
+			expectedSpeed:  100.0, // Clamped to max
+			expectedOutput: 20000.0,
+		},
+		{
+			name:          "zero duration",
+			totalDuration: 0,
+			cfg:           DefaultSpeedConfig(),
+			expectedSpeed: 1.0,
+			expectedOutput: 0,
+		},
+	}
+
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			speed, output := CalculateSpeed(tc.totalDuration, tc.cfg)
+
+			if math.Abs(speed-tc.expectedSpeed) > 0.001 {
+				t.Errorf("speed = %v, want %v", speed, tc.expectedSpeed)
+			}
+			if math.Abs(output-tc.expectedOutput) > 0.001 {
+				t.Errorf("output duration = %v, want %v", output, tc.expectedOutput)
+			}
+		})
+	}
+}
internal/protect/bootstrap.go
@@ -0,0 +1,63 @@
+package protect
+
+import (
+	"context"
+	"fmt"
+	"strings"
+)
+
+// Bootstrap fetches system information including all cameras.
+func (c *Client) Bootstrap(ctx context.Context) (*BootstrapResponse, error) {
+	req, err := c.newRequest(ctx, "GET", "/bootstrap", nil)
+	if err != nil {
+		return nil, fmt.Errorf("creating request: %w", err)
+	}
+
+	var resp BootstrapResponse
+	err = c.doJSON(req, &resp)
+	if err != nil {
+		return nil, fmt.Errorf("fetching bootstrap: %w", err)
+	}
+	return &resp, nil
+}
+
+// ResolveCamera finds a camera by ID or name.
+// If the input matches a camera ID exactly, that camera is returned.
+// Otherwise, it searches by name (case-insensitive partial match).
+// Returns ErrCameraNotFound if no match, ErrMultipleCameras if ambiguous.
+func (c *Client) ResolveCamera(ctx context.Context, idOrName string) (*Camera, error) {
+	bootstrap, err := c.Bootstrap(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	// First try exact ID match
+	for i := range bootstrap.Cameras {
+		if bootstrap.Cameras[i].ID == idOrName {
+			return &bootstrap.Cameras[i], nil
+		}
+	}
+
+	// Then try name match (case-insensitive)
+	var matches []*Camera
+	searchLower := strings.ToLower(idOrName)
+	for i := range bootstrap.Cameras {
+		nameLower := strings.ToLower(bootstrap.Cameras[i].Name)
+		if nameLower == searchLower || strings.Contains(nameLower, searchLower) {
+			matches = append(matches, &bootstrap.Cameras[i])
+		}
+	}
+
+	switch len(matches) {
+	case 0:
+		return nil, fmt.Errorf("%w: %s", ErrCameraNotFound, idOrName)
+	case 1:
+		return matches[0], nil
+	default:
+		names := make([]string, len(matches))
+		for i, cam := range matches {
+			names[i] = fmt.Sprintf("%s (%s)", cam.Name, cam.ID)
+		}
+		return nil, fmt.Errorf("%w: %s matches: %s", ErrMultipleCameras, idOrName, strings.Join(names, ", "))
+	}
+}
internal/protect/client.go
@@ -0,0 +1,208 @@
+package protect
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"log/slog"
+	"net/http"
+	"net/http/cookiejar"
+	"time"
+)
+
+// Sentinel errors for API operations.
+var (
+	ErrUnauthorized    = errors.New("unauthorized: invalid credentials")
+	ErrNotFound        = errors.New("resource not found")
+	ErrRateLimited     = errors.New("rate limited")
+	ErrServerError     = errors.New("server error")
+	ErrCameraNotFound  = errors.New("camera not found")
+	ErrMultipleCameras = errors.New("multiple cameras match name")
+	ErrNotLoggedIn     = errors.New("not logged in: call Login() first")
+)
+
+// Client is a UniFi Protect API client.
+type Client struct {
+	baseURL    string
+	apiPath    string // "/proxy/protect/api" or "/api"
+	csrfToken  string
+	loggedIn   bool
+	httpClient *http.Client
+}
+
+// ClientOption configures a Client.
+type ClientOption func(*Client)
+
+// WithTLSInsecure disables TLS certificate verification.
+func WithTLSInsecure() ClientOption {
+	return func(c *Client) {
+		if c.httpClient.Transport == nil {
+			c.httpClient.Transport = &http.Transport{}
+		}
+		if t, ok := c.httpClient.Transport.(*http.Transport); ok {
+			t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+		}
+	}
+}
+
+// WithTimeout sets the HTTP client timeout.
+func WithTimeout(d time.Duration) ClientOption {
+	return func(c *Client) {
+		c.httpClient.Timeout = d
+	}
+}
+
+// WithDirectAPI uses /api path instead of /proxy/protect/api.
+// Use this when connecting directly to a Protect NVR/Cloud Key
+// rather than through UniFi OS gateway.
+func WithDirectAPI() ClientOption {
+	return func(c *Client) {
+		c.apiPath = "/api"
+	}
+}
+
+// NewClient creates a new UniFi Protect API client.
+func NewClient(baseURL string, opts ...ClientOption) *Client {
+	jar, _ := cookiejar.New(nil)
+	c := &Client{
+		baseURL: baseURL,
+		apiPath: "/proxy/protect/api", // Default for UniFi OS
+		httpClient: &http.Client{
+			Timeout: 30 * time.Second,
+			Jar:     jar,
+		},
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	return c
+}
+
+// APIPath returns the current API path prefix.
+func (c *Client) APIPath() string {
+	return c.apiPath
+}
+
+// Login authenticates with UniFi Protect using username/password.
+// This obtains a session cookie that will be used for subsequent requests.
+func (c *Client) Login(ctx context.Context, username, password string) error {
+	creds := map[string]any{
+		"username":   username,
+		"password":   password,
+		"rememberMe": false,
+		"token":      "",
+	}
+	body, err := json.Marshal(creds)
+	if err != nil {
+		return fmt.Errorf("encoding credentials: %w", err)
+	}
+
+	url := c.baseURL + "/api/auth/login"
+	req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
+	if err != nil {
+		return fmt.Errorf("creating login request: %w", err)
+	}
+	req.Header.Set("Content-Type", "application/json")
+
+	slog.Debug("http request",
+		slog.String("method", req.Method),
+		slog.String("url", req.URL.String()))
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return fmt.Errorf("login request: %w", err)
+	}
+	defer resp.Body.Close()
+
+	slog.Debug("http response",
+		slog.Int("status", resp.StatusCode))
+
+	if resp.StatusCode != http.StatusOK {
+		respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
+		slog.Debug("login response body", slog.String("body", string(respBody)))
+		if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden {
+			return fmt.Errorf("%w: %s", ErrUnauthorized, string(respBody))
+		}
+		return fmt.Errorf("login failed with status %d: %s", resp.StatusCode, string(respBody))
+	}
+
+	// Extract CSRF token from response header
+	if token := resp.Header.Get("X-Csrf-Token"); token != "" {
+		c.csrfToken = token
+		slog.Debug("obtained CSRF token")
+	}
+
+	c.loggedIn = true
+	slog.Debug("login successful")
+	return nil
+}
+
+// newRequest creates an HTTP request with session cookies and CSRF token.
+// The path should be relative (e.g., "/bootstrap", not "/api/bootstrap").
+func (c *Client) newRequest(ctx context.Context, method, path string, body io.Reader) (*http.Request, error) {
+	url := c.baseURL + c.apiPath + path
+	req, err := http.NewRequestWithContext(ctx, method, url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Accept", "application/json")
+	if c.csrfToken != "" {
+		req.Header.Set("X-Csrf-Token", c.csrfToken)
+	}
+	return req, nil
+}
+
+// do executes a request and handles common error responses.
+func (c *Client) do(req *http.Request) (*http.Response, error) {
+	slog.Debug("http request",
+		slog.String("method", req.Method),
+		slog.String("url", req.URL.String()))
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	slog.Debug("http response",
+		slog.Int("status", resp.StatusCode))
+
+	switch resp.StatusCode {
+	case http.StatusOK, http.StatusPartialContent:
+		return resp, nil
+	case http.StatusUnauthorized, http.StatusForbidden:
+		resp.Body.Close()
+		return nil, ErrUnauthorized
+	case http.StatusNotFound:
+		resp.Body.Close()
+		return nil, ErrNotFound
+	case http.StatusTooManyRequests:
+		resp.Body.Close()
+		return nil, ErrRateLimited
+	default:
+		body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
+		resp.Body.Close()
+		if resp.StatusCode >= 500 {
+			return nil, fmt.Errorf("%w: %d: %s", ErrServerError, resp.StatusCode, string(body))
+		}
+		return nil, fmt.Errorf("unexpected status %d: %s", resp.StatusCode, string(body))
+	}
+}
+
+// doJSON executes a request and decodes the JSON response.
+func (c *Client) doJSON(req *http.Request, v any) error {
+	resp, err := c.do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	err = json.NewDecoder(resp.Body).Decode(v)
+	if err != nil {
+		return fmt.Errorf("decoding response: %w", err)
+	}
+	return nil
+}
internal/protect/download.go
@@ -0,0 +1,38 @@
+package protect
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/url"
+	"strconv"
+)
+
+// DownloadClip downloads a video clip for the specified time range.
+// The video is streamed directly to the provided writer.
+func (c *Client) DownloadClip(ctx context.Context, cameraID string, startMs, endMs int64, w io.Writer) error {
+	params := url.Values{}
+	params.Set("camera", cameraID)
+	params.Set("start", strconv.FormatInt(startMs, 10))
+	params.Set("end", strconv.FormatInt(endMs, 10))
+
+	path := "/video/export?" + params.Encode()
+	req, err := c.newRequest(ctx, "GET", path, nil)
+	if err != nil {
+		return fmt.Errorf("creating request: %w", err)
+	}
+
+	// Use longer timeout for downloads
+	resp, err := c.do(req)
+	if err != nil {
+		return fmt.Errorf("requesting video export: %w", err)
+	}
+	defer resp.Body.Close()
+
+	_, err = io.Copy(w, resp.Body)
+	if err != nil {
+		return fmt.Errorf("streaming video: %w", err)
+	}
+
+	return nil
+}
internal/protect/events.go
@@ -0,0 +1,100 @@
+package protect
+
+import (
+	"context"
+	"fmt"
+	"net/url"
+	"strconv"
+	"time"
+)
+
+// EventsParams configures the events query.
+type EventsParams struct {
+	CameraID  string
+	StartTime time.Time
+	EndTime   time.Time
+	Types     []string // e.g., "motion", "smartDetect"
+	Limit     int      // Max events per request
+}
+
+// ListEvents fetches events for the given parameters.
+// This handles pagination internally and returns all matching events.
+func (c *Client) ListEvents(ctx context.Context, params EventsParams) ([]Event, error) {
+	var allEvents []Event
+	limit := params.Limit
+	if limit <= 0 {
+		limit = 100
+	}
+
+	startMs := params.StartTime.UnixMilli()
+	endMs := params.EndTime.UnixMilli()
+
+	for {
+		events, err := c.fetchEventsPage(ctx, params.CameraID, startMs, endMs, params.Types, limit)
+		if err != nil {
+			return allEvents, err
+		}
+
+		if len(events) == 0 {
+			break
+		}
+
+		allEvents = append(allEvents, events...)
+
+		// If we got fewer than limit, we're done
+		if len(events) < limit {
+			break
+		}
+
+		// Use end time of last event (+1ms) as start for next page
+		lastEvent := events[len(events)-1]
+		startMs = lastEvent.End + 1
+	}
+
+	return allEvents, nil
+}
+
+// fetchEventsPage fetches a single page of events.
+func (c *Client) fetchEventsPage(ctx context.Context, cameraID string, startMs, endMs int64, types []string, limit int) ([]Event, error) {
+	params := url.Values{}
+	params.Set("cameras", cameraID)
+	params.Set("start", strconv.FormatInt(startMs, 10))
+	params.Set("end", strconv.FormatInt(endMs, 10))
+	params.Set("limit", strconv.Itoa(limit))
+	params.Set("orderDirection", "ASC")
+
+	if len(types) > 0 {
+		for _, t := range types {
+			params.Add("types", t)
+		}
+	}
+
+	path := "/events?" + params.Encode()
+	req, err := c.newRequest(ctx, "GET", path, nil)
+	if err != nil {
+		return nil, fmt.Errorf("creating request: %w", err)
+	}
+
+	var events EventsResponse
+	err = c.doJSON(req, &events)
+	if err != nil {
+		return nil, fmt.Errorf("fetching events: %w", err)
+	}
+
+	return events, nil
+}
+
+// ListDayEvents is a convenience method to list all events for a single day.
+func (c *Client) ListDayEvents(ctx context.Context, cameraID string, date time.Time) ([]Event, error) {
+	// Start of day in local time
+	startOfDay := time.Date(date.Year(), date.Month(), date.Day(), 0, 0, 0, 0, date.Location())
+	endOfDay := startOfDay.Add(24 * time.Hour)
+
+	return c.ListEvents(ctx, EventsParams{
+		CameraID:  cameraID,
+		StartTime: startOfDay,
+		EndTime:   endOfDay,
+		Types:     []string{"motion", "smartDetect"},
+		Limit:     100,
+	})
+}
internal/protect/types.go
@@ -0,0 +1,90 @@
+// Package protect provides a client for the UniFi Protect API.
+package protect
+
+import "time"
+
+// Camera represents a UniFi Protect camera.
+type Camera struct {
+	ID           string `json:"id"`
+	Name         string `json:"name"`
+	Type         string `json:"type"`
+	State        string `json:"state"`
+	IsConnected  bool   `json:"isConnected"`
+	Host         string `json:"host"`
+	Mac          string `json:"mac"`
+	ModelKey     string `json:"modelKey"`
+	FirmwareVer  string `json:"firmwareBuild"`
+	VideoWidth   int    `json:"videoWidth,omitempty"`
+	VideoHeight  int    `json:"videoHeight,omitempty"`
+	Channels     []struct {
+		ID       int  `json:"id"`
+		Width    int  `json:"width"`
+		Height   int  `json:"height"`
+		Enabled  bool `json:"enabled"`
+		FPS      int  `json:"fps"`
+		Bitrate  int  `json:"bitrate"`
+		IsRTSP   bool `json:"isRtspEnabled"`
+	} `json:"channels,omitempty"`
+}
+
+// Resolution returns the best available resolution for the camera.
+func (c *Camera) Resolution() (width, height int) {
+	if c.VideoWidth > 0 && c.VideoHeight > 0 {
+		return c.VideoWidth, c.VideoHeight
+	}
+	// Fall back to first enabled channel
+	for _, ch := range c.Channels {
+		if ch.Enabled && ch.Width > 0 {
+			return ch.Width, ch.Height
+		}
+	}
+	return 1920, 1080 // Default assumption
+}
+
+// NVR represents the UniFi Protect NVR.
+type NVR struct {
+	ID            string `json:"id"`
+	Name          string `json:"name"`
+	Host          string `json:"host"`
+	Mac           string `json:"mac"`
+	Version       string `json:"version"`
+	FirmwareVer   string `json:"firmwareBuild"`
+	IsConnected   bool   `json:"isConnectedToCloud"`
+	EnableAutoBkp bool   `json:"enableAutomaticBackups"`
+}
+
+// BootstrapResponse is the response from /api/bootstrap.
+type BootstrapResponse struct {
+	NVR     NVR      `json:"nvr"`
+	Cameras []Camera `json:"cameras"`
+}
+
+// Event represents a motion/smart detection event.
+type Event struct {
+	ID        string `json:"id"`
+	Type      string `json:"type"`
+	Start     int64  `json:"start"`     // Unix milliseconds
+	End       int64  `json:"end"`       // Unix milliseconds
+	Score     int    `json:"score"`     // 0-100 confidence
+	Camera    string `json:"camera"`    // Camera ID
+	Partition string `json:"partition"` // Storage partition
+	SmartTypes []string `json:"smartDetectTypes,omitempty"`
+}
+
+// StartTime returns the event start as a time.Time.
+func (e *Event) StartTime() time.Time {
+	return time.UnixMilli(e.Start)
+}
+
+// EndTime returns the event end as a time.Time.
+func (e *Event) EndTime() time.Time {
+	return time.UnixMilli(e.End)
+}
+
+// Duration returns the event duration.
+func (e *Event) Duration() time.Duration {
+	return time.Duration(e.End-e.Start) * time.Millisecond
+}
+
+// EventsResponse is the response from /api/events when pagination is used.
+type EventsResponse []Event
internal/retry/retry.go
@@ -0,0 +1,68 @@
+// Package retry provides exponential backoff with jitter.
+package retry
+
+import (
+	"context"
+	"errors"
+	"math/rand/v2"
+	"time"
+)
+
+// Config holds retry configuration.
+type Config struct {
+	MaxAttempts int
+	BaseDelay   time.Duration
+	MaxDelay    time.Duration
+	Multiplier  float64
+}
+
+// DefaultConfig returns a Config with sensible defaults.
+func DefaultConfig() Config {
+	return Config{
+		MaxAttempts: 3,
+		BaseDelay:   time.Second,
+		MaxDelay:    30 * time.Second,
+		Multiplier:  2.0,
+	}
+}
+
+// ErrMaxAttemptsExceeded indicates all retry attempts failed.
+var ErrMaxAttemptsExceeded = errors.New("max retry attempts exceeded")
+
+// Do executes fn with exponential backoff until it succeeds or max attempts reached.
+// The function should return nil on success, or an error to trigger a retry.
+// Context cancellation stops retries immediately.
+func Do(ctx context.Context, cfg Config, fn func() error) error {
+	var lastErr error
+	delay := cfg.BaseDelay
+
+	for attempt := 1; attempt <= cfg.MaxAttempts; attempt++ {
+		lastErr = fn()
+		if lastErr == nil {
+			return nil
+		}
+
+		if ctx.Err() != nil {
+			return ctx.Err()
+		}
+
+		if attempt == cfg.MaxAttempts {
+			break
+		}
+
+		// Add jitter: 75% to 125% of delay
+		jitter := 0.75 + rand.Float64()*0.5
+		sleepDuration := time.Duration(float64(delay) * jitter)
+
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case <-time.After(sleepDuration):
+		}
+
+		// Increase delay for next attempt
+		delay = min(time.Duration(float64(delay)*cfg.Multiplier), cfg.MaxDelay)
+	}
+
+	return errors.Join(ErrMaxAttemptsExceeded, lastErr)
+}
internal/retry/retry_test.go
@@ -0,0 +1,110 @@
+package retry
+
+import (
+	"context"
+	"errors"
+	"testing"
+	"time"
+)
+
+func TestDo_Success(t *testing.T) {
+	cfg := Config{
+		MaxAttempts: 3,
+		BaseDelay:   time.Millisecond,
+		MaxDelay:    10 * time.Millisecond,
+		Multiplier:  2.0,
+	}
+
+	calls := 0
+	err := Do(context.Background(), cfg, func() error {
+		calls++
+		return nil
+	})
+
+	if err != nil {
+		t.Errorf("expected nil error, got %v", err)
+	}
+	if calls != 1 {
+		t.Errorf("expected 1 call, got %d", calls)
+	}
+}
+
+func TestDo_RetryThenSuccess(t *testing.T) {
+	cfg := Config{
+		MaxAttempts: 3,
+		BaseDelay:   time.Millisecond,
+		MaxDelay:    10 * time.Millisecond,
+		Multiplier:  2.0,
+	}
+
+	calls := 0
+	err := Do(context.Background(), cfg, func() error {
+		calls++
+		if calls < 3 {
+			return errors.New("temporary error")
+		}
+		return nil
+	})
+
+	if err != nil {
+		t.Errorf("expected nil error, got %v", err)
+	}
+	if calls != 3 {
+		t.Errorf("expected 3 calls, got %d", calls)
+	}
+}
+
+func TestDo_MaxAttemptsExceeded(t *testing.T) {
+	cfg := Config{
+		MaxAttempts: 2,
+		BaseDelay:   time.Millisecond,
+		MaxDelay:    10 * time.Millisecond,
+		Multiplier:  2.0,
+	}
+
+	testErr := errors.New("persistent error")
+	calls := 0
+	err := Do(context.Background(), cfg, func() error {
+		calls++
+		return testErr
+	})
+
+	if err == nil {
+		t.Error("expected error, got nil")
+	}
+	if !errors.Is(err, ErrMaxAttemptsExceeded) {
+		t.Errorf("expected ErrMaxAttemptsExceeded, got %v", err)
+	}
+	if !errors.Is(err, testErr) {
+		t.Errorf("expected wrapped testErr, got %v", err)
+	}
+	if calls != 2 {
+		t.Errorf("expected 2 calls, got %d", calls)
+	}
+}
+
+func TestDo_ContextCancellation(t *testing.T) {
+	cfg := Config{
+		MaxAttempts: 10,
+		BaseDelay:   100 * time.Millisecond,
+		MaxDelay:    time.Second,
+		Multiplier:  2.0,
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	calls := 0
+
+	go func() {
+		time.Sleep(10 * time.Millisecond)
+		cancel()
+	}()
+
+	err := Do(ctx, cfg, func() error {
+		calls++
+		return errors.New("keep retrying")
+	})
+
+	if !errors.Is(err, context.Canceled) {
+		t.Errorf("expected context.Canceled, got %v", err)
+	}
+}
internal/sanitize/filename.go
@@ -0,0 +1,36 @@
+// Package sanitize provides utilities for creating filesystem-safe names.
+package sanitize
+
+import (
+	"strings"
+	"unicode"
+)
+
+// forbiddenChars contains characters that are not allowed in filenames.
+// This covers Windows (/ \ : * ? " < > |) and Unix (/) restrictions.
+const forbiddenChars = `/\:*?"<>|`
+
+// Filename removes or replaces characters that are unsafe for filenames.
+// It replaces forbidden characters with underscores and collapses multiple
+// underscores into one.
+func Filename(name string) string {
+	var b strings.Builder
+	b.Grow(len(name))
+
+	lastWasUnderscore := false
+	for _, r := range name {
+		if strings.ContainsRune(forbiddenChars, r) || unicode.IsControl(r) {
+			if !lastWasUnderscore {
+				b.WriteRune('_')
+				lastWasUnderscore = true
+			}
+			continue
+		}
+		b.WriteRune(r)
+		lastWasUnderscore = r == '_'
+	}
+
+	result := strings.TrimSpace(b.String())
+	result = strings.Trim(result, "_")
+	return result
+}
internal/sanitize/filename_test.go
@@ -0,0 +1,36 @@
+package sanitize
+
+import "testing"
+
+func TestFilename(t *testing.T) {
+	tests := []struct {
+		input    string
+		expected string
+	}{
+		{"simple", "simple"},
+		{"with spaces", "with spaces"},
+		{"with/slash", "with_slash"},
+		{"with\\backslash", "with_backslash"},
+		{"with:colon", "with_colon"},
+		{"with*star", "with_star"},
+		{"with?question", "with_question"},
+		{`with"quote`, "with_quote"},
+		{"with<less", "with_less"},
+		{"with>greater", "with_greater"},
+		{"with|pipe", "with_pipe"},
+		{"multiple///slashes", "multiple_slashes"},
+		{"  leading spaces", "leading spaces"},
+		{"trailing spaces  ", "trailing spaces"},
+		{"___leading_underscores", "leading_underscores"},
+		{"trailing_underscores___", "trailing_underscores"},
+		{"Camera:Front/Back", "Camera_Front_Back"},
+		{"", ""},
+	}
+
+	for _, tc := range tests {
+		result := Filename(tc.input)
+		if result != tc.expected {
+			t.Errorf("Filename(%q) = %q, want %q", tc.input, result, tc.expected)
+		}
+	}
+}
internal/workerpool/pool.go
@@ -0,0 +1,136 @@
+// Package workerpool provides a bounded concurrent worker pool.
+package workerpool
+
+import (
+	"context"
+	"sync"
+)
+
+// Task represents a unit of work.
+type Task func(ctx context.Context) error
+
+// Result holds the outcome of a task.
+type Result struct {
+	Index int
+	Error error
+}
+
+// Pool executes tasks with bounded concurrency.
+type Pool struct {
+	workers int
+}
+
+// New creates a pool with the specified number of workers.
+func New(workers int) *Pool {
+	if workers < 1 {
+		workers = 1
+	}
+	return &Pool{workers: workers}
+}
+
+// Run executes all tasks with bounded concurrency.
+// Returns results in task order (not completion order).
+// Continues processing remaining tasks even if some fail.
+func (p *Pool) Run(ctx context.Context, tasks []Task) []Result {
+	results := make([]Result, len(tasks))
+
+	if len(tasks) == 0 {
+		return results
+	}
+
+	taskCh := make(chan int, len(tasks))
+	resultCh := make(chan Result, len(tasks))
+
+	var wg sync.WaitGroup
+
+	// Start workers
+	for i := 0; i < p.workers; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for idx := range taskCh {
+				select {
+				case <-ctx.Done():
+					resultCh <- Result{Index: idx, Error: ctx.Err()}
+				default:
+					err := tasks[idx](ctx)
+					resultCh <- Result{Index: idx, Error: err}
+				}
+			}
+		}()
+	}
+
+	// Send tasks
+	for i := range tasks {
+		taskCh <- i
+	}
+	close(taskCh)
+
+	// Wait for completion in background
+	go func() {
+		wg.Wait()
+		close(resultCh)
+	}()
+
+	// Collect results
+	for result := range resultCh {
+		results[result.Index] = result
+	}
+
+	return results
+}
+
+// RunWithProgress executes tasks and calls progress after each completion.
+func (p *Pool) RunWithProgress(ctx context.Context, tasks []Task, progress func(completed, total int)) []Result {
+	results := make([]Result, len(tasks))
+
+	if len(tasks) == 0 {
+		return results
+	}
+
+	taskCh := make(chan int, len(tasks))
+	resultCh := make(chan Result, len(tasks))
+
+	var wg sync.WaitGroup
+
+	// Start workers
+	for i := 0; i < p.workers; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			for idx := range taskCh {
+				select {
+				case <-ctx.Done():
+					resultCh <- Result{Index: idx, Error: ctx.Err()}
+				default:
+					err := tasks[idx](ctx)
+					resultCh <- Result{Index: idx, Error: err}
+				}
+			}
+		}()
+	}
+
+	// Send tasks
+	for i := range tasks {
+		taskCh <- i
+	}
+	close(taskCh)
+
+	// Wait for completion in background
+	go func() {
+		wg.Wait()
+		close(resultCh)
+	}()
+
+	// Collect results with progress
+	completed := 0
+	for result := range resultCh {
+		results[result.Index] = result
+		completed++
+		if progress != nil {
+			progress(completed, len(tasks))
+		}
+	}
+
+	return results
+}
internal/workerpool/pool_test.go
@@ -0,0 +1,106 @@
+package workerpool
+
+import (
+	"context"
+	"errors"
+	"sync/atomic"
+	"testing"
+	"time"
+)
+
+func TestPool_Run(t *testing.T) {
+	pool := New(2)
+
+	var count atomic.Int32
+	tasks := make([]Task, 10)
+	for i := range tasks {
+		tasks[i] = func(ctx context.Context) error {
+			count.Add(1)
+			return nil
+		}
+	}
+
+	results := pool.Run(context.Background(), tasks)
+
+	if len(results) != 10 {
+		t.Errorf("expected 10 results, got %d", len(results))
+	}
+	if count.Load() != 10 {
+		t.Errorf("expected 10 task executions, got %d", count.Load())
+	}
+	for i, r := range results {
+		if r.Error != nil {
+			t.Errorf("task %d unexpected error: %v", i, r.Error)
+		}
+	}
+}
+
+func TestPool_RunWithErrors(t *testing.T) {
+	pool := New(2)
+
+	testErr := errors.New("test error")
+	tasks := []Task{
+		func(ctx context.Context) error { return nil },
+		func(ctx context.Context) error { return testErr },
+		func(ctx context.Context) error { return nil },
+	}
+
+	results := pool.Run(context.Background(), tasks)
+
+	if results[0].Error != nil {
+		t.Error("task 0 should succeed")
+	}
+	if !errors.Is(results[1].Error, testErr) {
+		t.Errorf("task 1 expected testErr, got %v", results[1].Error)
+	}
+	if results[2].Error != nil {
+		t.Error("task 2 should succeed")
+	}
+}
+
+func TestPool_ContextCancellation(t *testing.T) {
+	pool := New(1)
+
+	ctx, cancel := context.WithCancel(context.Background())
+
+	started := make(chan struct{})
+	tasks := []Task{
+		func(ctx context.Context) error {
+			close(started)
+			time.Sleep(100 * time.Millisecond)
+			return nil
+		},
+		func(ctx context.Context) error {
+			return nil
+		},
+	}
+
+	go func() {
+		<-started
+		cancel()
+	}()
+
+	results := pool.Run(ctx, tasks)
+
+	// At least one task should have context error
+	hasCtxErr := false
+	for _, r := range results {
+		if errors.Is(r.Error, context.Canceled) {
+			hasCtxErr = true
+			break
+		}
+	}
+	if !hasCtxErr {
+		// May complete before cancellation - that's OK
+		t.Log("all tasks completed before cancellation")
+	}
+}
+
+func TestPool_EmptyTasks(t *testing.T) {
+	pool := New(4)
+	results := pool.Run(context.Background(), nil)
+
+	if len(results) != 0 {
+		t.Errorf("expected 0 results, got %d", len(results))
+	}
+}
.gitignore
@@ -0,0 +1,2 @@
+/out
+upvs
go.mod
@@ -0,0 +1,10 @@
+module github.com/crash/upvs
+
+go 1.23
+
+require github.com/spf13/cobra v1.8.1
+
+require (
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+)
go.sum
@@ -0,0 +1,10 @@
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
Makefile
@@ -0,0 +1,22 @@
+.PHONY: build test clean lint fmt
+
+BINARY := upvs
+CMD := ./cmd/upvs
+
+build:
+	go build -o $(BINARY) $(CMD)
+
+test:
+	go test ./...
+
+clean:
+	rm -f $(BINARY)
+	go clean
+
+lint:
+	go vet ./...
+
+fmt:
+	go fmt ./...
+
+.DEFAULT_GOAL := build
README.md
@@ -1,3 +1,47 @@
-# `upvs`
+# upvs
 
-UniFi Protect Video Summary (`upvs`)
+UniFi Protect Video Summary - Create ~10-minute timelapses from a day's recordings.
+
+## Quick Start
+
+```bash
+# Build
+go build ./cmd/upvs
+
+# Set credentials (or use --api-key flag)
+export UPVS_HOST=https://192.168.1.1
+export UPVS_API_KEY=your-api-key-here
+
+# Run full pipeline for a single day
+./upvs run --camera "Front Door" --date 2024-01-15 --out ./output
+
+# Or run phases separately
+./upvs scan   --camera "Front Door" --date 2024-01-15 --out ./output
+./upvs fetch  --camera "Front Door" --date 2024-01-15 --out ./output --workers 4
+./upvs render --camera "Front Door" --date 2024-01-15 --out ./output
+```
+
+## API Key
+
+Generate an API key in UniFi OS: **Settings > Control Plane > Integrations**
+
+Pass via:
+- `--api-key` flag
+- `--api-key-file` flag (path to file containing key)
+- `UPVS_API_KEY` environment variable
+
+## Output
+
+```
+output/
+├── timelapse/Front_Door_2024-01-15_timelapse.mp4  # Final video
+├── clips/Front_Door/2024-01-15/*.mp4              # Downloaded clips
+├── manifests/clip_index.json                       # Event tracking
+└── metadata/day.json                               # Speed calculation
+```
+
+## Requirements
+
+- Go 1.23+
+- FFmpeg (in PATH)
+- UniFi Protect with API key access
SPEC.md
@@ -1,491 +1,518 @@
-# UniFi Protect → Single-Day Timelapse (Gap-Skipping)
+# UniFi Protect Single-Day Timelapse Specification
 
 ## 1. Purpose
 
 Build a deterministic, resumable pipeline that:
 
-1. Downloads **all UniFi Protect recording clips** for **one known camera** and **one specific day**.
-2. Produces **one timelapse MP4** for that day by:
-
-   * concatenating clips in chronological order (gaps skipped implicitly)
-   * speeding up playback so the final video is **~10 minutes**
-3. Relies on the camera’s **existing burned-in timestamps** (no additional overlay).
+1. Downloads all motion/smart detection event clips for one camera and one day
+2. Produces one timelapse MP4 by concatenating clips chronologically and speeding up playback to ~10 minutes
+3. Relies on the camera's existing burned-in timestamps (no additional overlay)
 
 Non-goals: multi-camera, multi-day batching, motion-only exports, UI automation, cloud uploading.
 
 ---
 
-## 2. Inputs
-
-### Required
-
-* `protect_host` — base URL, e.g. `https://unvr.local`
-* `username`, `password` — Protect admin credentials (may be provided via env/secret file)
-* `camera_selector` — camera ID **or** camera name (must resolve uniquely)
-* `date` — target calendar day in UTC: `YYYY-MM-DD`
-* `out_dir` — output root directory
-
-### Optional (defaults shown)
-
-* `target_duration` — `10m` (600 seconds)
-* `output_fps` — `30`
-* `video_codec` — `h264`
-* `crf` — `23`
-* `preset` — `medium`
-* `tls_insecure` — `false` (skip TLS verification only if explicitly enabled)
-* `max_workers` — `3` (download concurrency)
-* `retry_limit` — `5`
-* `retry_backoff` — exponential, capped
-
----
-
-## 3. Outputs
-
-### Primary
-
-* `timelapse/<camera-name>_<YYYY-MM-DD>_timelapse.mp4`
-
-### Secondary (auditing + resuming)
+## 2. Authentication
 
-* `metadata/camera.json`
-* `metadata/day.json`
-* `manifests/clip_index.json`
-* `manifests/concat.txt`
-* `clips/<camera-name>/<YYYY-MM-DD>/clip_<start>_<end>_<eventId>.mp4`
-* `logs/run.log`
+### API Key Authentication (Recommended)
 
----
+UniFi Protect supports API key authentication for programmatic access:
 
-## 4. Deterministic Directory Layout
+- Keys are generated in UniFi OS: **Settings > Control Plane > Integrations**
+- Pass the key via `X-API-Key` HTTP header on all requests
+- No session management, cookies, or login flow required
+- Stateless - each request is independently authenticated
 
 ```
-out/
-  metadata/
-    camera.json
-    day.json
-  clips/
-    <camera-name>/
-      YYYY-MM-DD/
-        clip_<startISO>_<endISO>_<eventId>.mp4
-        ...
-  manifests/
-    clip_index.json
-    concat.txt
-  timelapse/
-    <camera-name>_<YYYY-MM-DD>_timelapse.mp4
-  logs/
-    run.log
+GET /proxy/protect/api/bootstrap
+X-API-Key: <your-api-key>
 ```
 
-### Naming rules
-
-* `<camera-name>` is sanitized for filesystem safety (remove/replace `/ \ : * ? " < > |`, trim whitespace).
-* `<startISO>` and `<endISO>` are UTC timestamps formatted as `YYYYMMDDTHHMMSSZ`.
-* Output filenames must be stable across reruns given the same inputs.
-
----
-
-## 5. Time Semantics
-
-### Day window (UTC)
+### Implementation Notes
 
-For input `date = YYYY-MM-DD`:
+- API keys provide full access to the Protect API
+- Keys do not expire but can be revoked in the UI
+- TLS verification should be enabled; use `--tls-insecure` only for self-signed certs
+- Never log or persist the API key
 
-* `day_start = YYYY-MM-DDT00:00:00.000Z`
-* `day_end   = YYYY-MM-DDT23:59:59.999Z`
+### Documentation References
 
-Internally, times are stored as:
-
-* epoch milliseconds (`*_ms`) for Protect API interaction
-* RFC3339 UTC strings for human-facing metadata/artifacts
+- **Official Getting Started:** https://developer.ui.com/protect/v6.2.83/gettingstarted
+- **Ubiquiti Help Center:** https://help.ui.com/hc/en-us/articles/30076656117655-Getting-Started-with-the-Official-UniFi-API
 
 ---
 
-## 6. UniFi Protect API Interaction
+## 3. Inputs
 
-### 6.1 Authentication
+### Required
 
-* Login via Protect API, capture session cookies.
-* All subsequent requests must include cookies.
-* Session expiry must be detected and retried via re-login.
+| Input | Description |
+|-------|-------------|
+| `host` | UniFi Protect URL, e.g. `https://192.168.1.1` |
+| `api_key` | API key from UniFi OS |
+| `camera` | Camera ID or name (must resolve uniquely) |
+| `date` | Target day: `YYYY-MM-DD` |
+| `out_dir` | Output root directory |
 
-**Behavior requirements**
+### Optional (defaults shown)
 
-* If any API call returns an auth failure (401/403), attempt re-login once and retry the call.
-* If re-login fails, abort.
+| Input | Default | Description |
+|-------|---------|-------------|
+| `target_duration` | `600` | Target output seconds (~10 min) |
+| `output_fps` | `30` | Output frame rate |
+| `crf` | `23` | FFmpeg quality (0-51, lower=better) |
+| `preset` | `medium` | FFmpeg encoding preset |
+| `tls_insecure` | `false` | Skip TLS verification |
+| `max_workers` | `4` | Download concurrency |
+| `retry_limit` | `3` | Retries per download |
 
-### 6.2 Bootstrap (camera resolution)
+---
 
-Fetch bootstrap and resolve camera:
+## 4. Output Structure
 
-* If `camera_selector` is an ID, match directly.
-* If it is a name, match by exact name; fail if 0 or >1 matches.
+```
+out/
+├── metadata/
+│   ├── camera.json        # Resolved camera info
+│   └── day.json           # Speed calculation, clip counts
+├── clips/<camera>/<date>/
+│   └── clip_<start>_<end>_<eventId>.mp4
+├── manifests/
+│   ├── clip_index.json    # All events with download status
+│   └── concat.txt         # FFmpeg concat demuxer file
+├── timelapse/
+│   └── <camera>_<date>_timelapse.mp4
+└── logs/
+    └── run.log
+```
 
-Persist resolved camera metadata to `metadata/camera.json` including at minimum:
+### Naming Rules
 
-* camera_id
-* camera_name
-* model (if available)
-* mac (if available)
-* host
-* resolution/fps (if available; informational)
+- Camera name is sanitized: remove `/ \ : * ? " < > |`, collapse underscores, trim
+- Timestamps use milliseconds for API compatibility
+- Filenames must be stable across reruns
 
 ---
 
-## 7. Phase A: Enumerate Recording Events for the Day
-
-### Goal
-
-Build a complete ordered list of recording events for the camera within the day window.
-
-### Requirements
+## 5. UniFi Protect API
+
+> **Documentation:** The official API reference is at https://developer.ui.com/protect/.
+> Additional implementation details have been documented by community projects including
+> [unifi-protect](https://github.com/hjdhjd/unifi-protect) and
+> [pyunifiprotect](https://github.com/AngellusMortis/pyunifiprotect).
+
+### Base URL
+
+All endpoints are prefixed with `/proxy/protect/api/` when accessing via UniFi OS.
+
+### 5.1 Bootstrap
+
+**Endpoint:** `GET /proxy/protect/api/bootstrap`
+
+Returns system information including all cameras.
+
+**Response structure:**
+```json
+{
+  "nvr": {
+    "id": "string",
+    "name": "string",
+    "version": "string"
+  },
+  "cameras": [
+    {
+      "id": "string",
+      "name": "string",
+      "type": "string",
+      "state": "string",
+      "isConnected": true,
+      "host": "string",
+      "mac": "string",
+      "modelKey": "string",
+      "channels": [
+        {
+          "id": 0,
+          "width": 1920,
+          "height": 1080,
+          "enabled": true,
+          "fps": 30
+        }
+      ]
+    }
+  ]
+}
+```
 
-* Query Protect events filtered to:
+**Camera Resolution:**
+1. If `camera` input matches a camera ID exactly, use that camera
+2. Otherwise, search by name (case-insensitive)
+3. Fail if zero matches or multiple matches
+
+### 5.2 Events
+
+**Endpoint:** `GET /proxy/protect/api/events`
+
+**Query Parameters:**
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `cameras` | string | Camera ID |
+| `start` | int64 | Start time (Unix ms) |
+| `end` | int64 | End time (Unix ms) |
+| `types` | string[] | Event types: `motion`, `smartDetect` |
+| `limit` | int | Max results per page (default 100) |
+| `orderDirection` | string | `ASC` or `DESC` |
+| `after` | string | Pagination cursor (last event ID) |
+
+**Response:** Array of events
+```json
+[
+  {
+    "id": "string",
+    "type": "motion",
+    "start": 1705334400000,
+    "end": 1705334460000,
+    "score": 85,
+    "camera": "camera-id",
+    "smartDetectTypes": ["person", "vehicle"]
+  }
+]
+```
 
-  * `type = recording`
-  * `cameraId = resolved camera_id`
-  * `start` and `end` time bounds covering the day
-* Pagination must be supported until exhaustion.
+**Pagination:**
+- Request with `limit=100` and `orderDirection=ASC`
+- If response contains `limit` events, fetch next page using `after=<last-event-id>`
+- Continue until fewer than `limit` events returned
 
-### Event fields to record (per event)
+**Day Window:**
+- For date `2024-01-15`, query:
+  - `start`: midnight local time as Unix ms
+  - `end`: midnight + 24 hours as Unix ms
 
-* `event_id`
-* `camera_id`
-* `start_ms`
-* `end_ms`
-* `duration_ms = end_ms - start_ms` (non-negative; if negative, mark as invalid)
-* `sequence` (ordering index after sorting)
+### 5.3 Video Export
 
-Persist full index to `manifests/clip_index.json`.
+> See also: [How the API for video downloads works](https://github.com/danielfernau/unifi-protect-video-downloader/wiki/How-the-API-for-video-downloads-works-(simplified))
 
-### Sorting
+**Endpoint:** `GET /proxy/protect/api/video/export`
 
-* Sort events by `(start_ms, end_ms, event_id)` ascending.
-* Assign `sequence = 0..N-1` after sorting.
+**Query Parameters:**
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `camera` | string | Camera ID |
+| `start` | int64 | Start time (Unix ms) |
+| `end` | int64 | End time (Unix ms) |
 
-### Validation
+**Response:** Binary MP4 stream
 
-* If an event is missing a valid `start_ms` or `end_ms`, record it with status `invalid` and exclude it from download/concat.
+**Important:** Use the camera ID, not event ID, for downloads. The start/end times come from the event.
 
 ---
 
-## 8. Phase B: Download Clips (Original MP4)
-
-### Goal
-
-Download one MP4 per valid event into the deterministic clip library.
-
-### Requirements
-
-* Download endpoint must yield the event’s MP4.
-* Stream download to a temporary path:
+## 6. Pipeline Phases
+
+### Phase A: Scan (Enumerate Events)
+
+1. Resolve camera by ID or name via bootstrap
+2. Query events for the day with pagination
+3. Build clip index with all events
+4. Sort by `(start_ms, end_ms, event_id)` for deterministic ordering
+5. Write `manifests/clip_index.json`
+
+**Clip Index Entry:**
+```json
+{
+  "event_id": "string",
+  "start_ms": 1705334400000,
+  "end_ms": 1705334460000,
+  "duration_ms": 60000,
+  "event_type": "motion",
+  "smart_types": ["person"],
+  "score": 85,
+  "status": "pending",
+  "file_path": "clips/camera/2024-01-15/clip_xxx.mp4"
+}
+```
 
-  * `...mp4.partial`
-* On success:
+### Phase B: Fetch (Download Clips)
 
-  * fsync/close
-  * atomic rename to final `.mp4`
-* If final `.mp4` already exists and is non-empty:
+1. Read clip index
+2. For each pending clip:
+   - Skip if final `.mp4` exists and is non-empty
+   - Delete any existing `.partial` file
+   - Download to `.partial` file
+   - fsync and close
+   - Atomic rename to final `.mp4`
+3. Update clip index with status
 
-  * skip download
-* If `.partial` exists:
+**Concurrency:**
+- Use bounded worker pool (default 4 workers)
+- Each worker processes clips independently
 
-  * delete and retry download (simpler, deterministic)
+**Retry:**
+- Exponential backoff with jitter: base 1s, max 30s, multiplier 2x
+- Jitter: 75-125% of delay
+- Max attempts configurable (default 3)
 
-### Concurrency
+**Status Values:**
+- `pending`: Not yet attempted
+- `in_progress`: Currently downloading
+- `complete`: Successfully downloaded
+- `failed`: All retries exhausted
 
-* Use a worker pool of size `max_workers` for downloads.
-* Workers must be bounded to avoid overloading Protect.
+### Phase C: Build Manifest
 
-### Retries
+1. Read clip index
+2. Filter to `complete` status clips
+3. Verify each file exists and is valid (optional: ffprobe check)
+4. Write `manifests/concat.txt` in FFmpeg format:
 
-* Retry transient failures up to `retry_limit` with exponential backoff:
+```
+file '/absolute/path/to/clip1.mp4'
+file '/absolute/path/to/clip2.mp4'
+```
 
-  * include network errors, 5xx, timeouts
-* Do not retry deterministic failures more than once:
+**Path Escaping:** Single quotes in paths must be escaped as `'\''`
 
-  * 404 on event download (record as missing)
-* On auth failures:
+### Phase D: Compute Speed
 
-  * re-login and retry once (see Authentication)
+**Formula:**
+```
+speed = total_recorded_seconds / target_output_seconds
+speed = clamp(speed, 1.0, 2000.0)
+```
 
-### Recording status
+**Example:**
+- 2 hours of footage (7200s) targeting 10 minutes (600s)
+- Speed = 7200 / 600 = 12x
+
+**Edge Cases:**
+- Less footage than target: speed = 1.0 (no slowdown)
+- Extremely long footage: clamp to 2000x max
+
+Write results to `metadata/day.json`:
+```json
+{
+  "camera_id": "string",
+  "camera_name": "string",
+  "date": "2024-01-15",
+  "total_clips": 150,
+  "valid_clips": 148,
+  "total_duration_secs": 7200.0,
+  "speed_factor": 12.0,
+  "target_secs": 600,
+  "output_duration_secs": 600.0
+}
+```
 
-Update `manifests/clip_index.json` to reflect:
+### Phase E: Render (FFmpeg)
+
+**Command:**
+```bash
+ffmpeg -f concat -safe 0 -i concat.txt \
+  -vf "setpts=PTS/<speed>" \
+  -r 30 \
+  -c:v libx264 \
+  -crf 23 \
+  -preset medium \
+  -pix_fmt yuv420p \
+  -an \
+  -y \
+  output.mp4
+```
 
-* downloaded: true/false
-* local_path
-* bytes (if known)
-* last_error (if failed)
+**Filter Explanation:**
+- `setpts=PTS/N` divides presentation timestamps by N, making video N times faster
+- Example: `setpts=PTS/12` plays at 12x speed
 
-(Implementation may also maintain a small local DB; this spec only requires the manifest be correct.)
+**Output Settings:**
+- `-r 30`: Constant 30 FPS output
+- `-c:v libx264`: H.264 codec
+- `-crf 23`: Quality (0=lossless, 51=worst)
+- `-preset medium`: Encoding speed/quality tradeoff
+- `-pix_fmt yuv420p`: Compatibility
+- `-an`: No audio
 
 ---
 
-## 9. Phase C: Build Concatenation Manifest (Gap-Skipping)
-
-### Goal
-
-Create a concat list that plays only recorded footage, in order, skipping gaps naturally.
+## 7. Resumability
 
-### Requirements
+The pipeline is designed to be restartable:
 
-* Include only clips that:
+1. **Scan:** Always re-enumerates events (fast operation)
+2. **Fetch:** Skips clips where final `.mp4` exists and is non-empty
+3. **Render:** Regenerates concat.txt and re-renders (clips are preserved)
 
-  * are marked valid
-  * were successfully downloaded
-  * exist on disk and are non-empty
-* Ordered strictly by event sort order.
-
-### Output
-
-Write `manifests/concat.txt` in FFmpeg concat demuxer format:
-
-* One entry per clip, absolute or out_dir-relative paths (choose one and be consistent).
-* Must be safe for special characters (prefer quoting/escaping per FFmpeg concat file rules).
-
-### Validation
-
-* If zero clips are available, abort timelapse step with a clear message and preserve manifests/logs.
+**Partial Downloads:**
+- `.partial` files indicate interrupted downloads
+- Delete before retry to ensure clean state
+- Never trust partial file contents
 
 ---
 
-## 10. Phase D: Compute Speed to Target ~10 Minutes
-
-### Goal
+## 8. Error Handling
 
-Determine a single speed factor so the concatenated footage becomes approximately `target_duration` (default 600s).
+### HTTP Errors
 
-### Definitions
+| Status | Action |
+|--------|--------|
+| 401/403 | Fail immediately - invalid API key |
+| 404 | Mark clip as failed, continue with others |
+| 429 | Retry with backoff |
+| 5xx | Retry with backoff |
 
-* `target_output_seconds = 600` (from `target_duration`)
-* `clip_duration_seconds_i` — preferred from event metadata:
+### Sentinel Errors
 
-  * `duration_ms / 1000`
-* `total_recorded_seconds = Σ clip_duration_seconds_i` over included clips
+Define inspectable errors for programmatic handling:
+- `ErrUnauthorized`: Invalid API key
+- `ErrCameraNotFound`: Camera doesn't exist
+- `ErrMultipleCameras`: Ambiguous camera name
+- `ErrMaxAttemptsExceeded`: Retries exhausted
 
-### Rules
+### Zero-Data Day
 
-1. If `total_recorded_seconds <= target_output_seconds`:
-
-   * `speed = 1.0`
-2. Else:
-
-   * `speed = total_recorded_seconds / target_output_seconds`
-
-### Clamping (recommended)
-
-* `min_speed = 1.0`
-* `max_speed = 2000.0`
-* `speed = clamp(speed, min_speed, max_speed)`
-
-### Rounding
-
-* Round `speed` to 2 decimal places for stability:
-
-  * e.g. `123.46`
-
-### Persist
-
-Write `metadata/day.json` including:
-
-* date
-* day_start, day_end (RFC3339 UTC)
-* clip_count_total
-* clip_count_included
-* total_recorded_seconds
-* target_output_seconds
-* computed_speed (pre-clamp)
-* clamped_speed (final)
-* expected_output_seconds = total_recorded_seconds / clamped_speed
+If no events found:
+- Write empty clip index
+- Skip fetch and render phases
+- Exit cleanly with message
 
 ---
 
-## 11. Phase E: Generate Timelapse with FFmpeg (Concat + Speed-Up)
-
-### Goal
-
-Produce a single MP4 timelapse that:
-
-* plays clips sequentially
-* skips gaps implicitly
-* accelerates video to meet the 10-minute target
-* preserves burned-in timestamps already present in frames
-* uses a constant output framerate for smooth playback
-
-### Requirements
-
-* Timelapse must be **re-encoded** (filters require it).
-* Output must be placed in:
-
-  * `timelapse/<camera-name>_<YYYY-MM-DD>_timelapse.mp4`
-
-### Video filter rule
-
-Apply playback speed-up via:
-
-* `setpts=PTS/<speed>`
-
-Where `<speed>` is the final computed clamped speed from `metadata/day.json`.
-
-### Output characteristics (defaults)
-
-* Codec: H.264
-* FPS: 30
-* Pixel format: yuv420p
-* Quality: CRF 23, preset medium
-* Audio: dropped/disabled (unless Protect clips include meaningful audio and user opts in; default is no audio)
-
-### VFR handling
-
-Because Protect clips may be variable framerate:
-
-* The pipeline must normalize the output to a constant FPS (`output_fps`).
-* Minor boundary stutter is acceptable; major artifacts should be flagged in validation.
-
-### Intermediate files
+## 9. CLI Interface
 
-Intermediate “full concatenated” file is optional.
-If created, it must be stored under `timelapse/` and named deterministically, but the pipeline must be able to run without it.
+```
+upvs [global flags] <command>
+
+Global Flags:
+  --host           UniFi Protect URL (env: UPVS_HOST)
+  --api-key        API key (env: UPVS_API_KEY)
+  --api-key-file   Path to file containing API key
+  --camera         Camera ID or name
+  --out            Output directory
+  --tls-insecure   Skip TLS verification
+  --verbose        Enable debug logging
+
+Commands:
+  scan    Enumerate events for a day
+  fetch   Download clips from clip index
+  render  Generate timelapse from downloaded clips
+  run     Full pipeline: scan + fetch + render
+
+Scan/Fetch/Render/Run Flags:
+  --date           Target date YYYY-MM-DD (required)
+
+Fetch Flags:
+  --workers        Download concurrency (default 4)
+  --retries        Retry attempts per clip (default 3)
+
+Render Flags:
+  --target         Target duration seconds (default 600)
+  --fps            Output frame rate (default 30)
+  --crf            FFmpeg CRF quality (default 23)
+  --preset         FFmpeg preset (default "medium")
+```
 
 ---
 
-## 12. Verification & Acceptance Criteria
-
-### Clip verification
-
-* All included clips exist and are non-empty.
-* Number of downloaded clips matches number of included events (unless failures recorded).
-
-### Ordering verification
-
-* `concat.txt` ordering matches ascending `(start_ms, end_ms, event_id)`.
-
-### Timelapse verification
+## 10. Verification
 
-* Output file exists and is non-empty.
-* Output duration:
+### Automated Checks
 
-  * If `total_recorded_seconds >= target_output_seconds`:
+1. All included clips exist and are non-empty
+2. Clip count matches events (minus failures)
+3. concat.txt ordering matches sorted events
+4. Output file exists and is non-empty
 
-    * duration is approximately `target_output_seconds` (tolerance ± 10 seconds)
-  * Else:
+### Duration Verification
 
-    * duration approximately `total_recorded_seconds` (tolerance ± 10 seconds)
-* Visual spot-check guidance (manual):
+- If total >= target: output duration ≈ target (±10s tolerance)
+- If total < target: output duration ≈ total (±10s tolerance)
 
-  * Start: first clip boundary looks correct
-  * Middle: timestamps progress (with jumps at gaps)
-  * End: final clip plays and ends cleanly
+### Manual Spot-Check
 
-### Auditability
-
-* `metadata/day.json` must allow a reviewer to understand:
-
-  * what clips were included
-  * total recorded time
-  * why the timelapse is sped up by the given factor
+- Start: First clip boundary correct
+- Middle: Timestamps progress (with gaps)
+- End: Final clip ends cleanly
 
 ---
 
-## 13. Failure & Recovery Model
-
-### General
-
-The pipeline is restartable and must not require cleanup to resume.
-
-### Resume rules
-
-* Existing valid clip MP4s are never re-downloaded.
-* A failed download remains listed in `clip_index.json` with `last_error`.
-* Rerun attempts failed downloads again up to retry rules, unless explicitly disabled.
+## 11. Security
 
-### Partial downloads
-
-* `.partial` files are treated as incomplete and are deleted before retry.
-
-### Zero-data day
-
-If enumeration returns no valid downloadable events:
-
-* Do not attempt timelapse generation.
-* Produce `metadata/*` and manifests as usual.
-* Exit with a clear “no clips available” result.
+- API key must not be logged or written to disk
+- Use `log/slog` structured logging (no credential fields)
+- TLS verification enabled by default
+- `--tls-insecure` must be explicitly set and logged as warning
 
 ---
 
-## 14. Logging
-
-### Requirements
-
-* Write a run log to `logs/run.log`.
-* Log must include:
+## 12. Dependencies
 
-  * resolved camera
-  * day window
-  * number of events enumerated
-  * number of clips downloaded/skipped/failed
-  * total recorded seconds
-  * computed speed
-  * ffmpeg invocation summary (arguments redacted if they contain secrets; no credentials logged)
-  * output path and resulting duration (if measured)
+- **CLI Framework:** github.com/spf13/cobra (flags, subcommands, env binding)
+- **HTTP:** Standard library `net/http`
+- **Logging:** Standard library `log/slog`
+- **FFmpeg:** External binary (must be in PATH)
 
 ---
 
-## 15. Security Considerations
+## 13. Implementation Notes
 
-* Credentials must not be written to disk by default.
-* Logs must not include passwords or session cookies.
-* If TLS verification is disabled, it must be an explicit user choice and logged as such.
+### Time Handling
 
----
-
-## 16. Configuration & CLI Contract
-
-### Core commands (conceptual)
+- Store times as Unix milliseconds (`int64`) for API compatibility
+- Use `time.UnixMilli()` for conversion
+- Parse dates with `time.Parse("2006-01-02", dateStr)`
 
-* `scan` — resolve camera, enumerate events, write `clip_index.json`
-* `fetch` — download clips listed in `clip_index.json`
-* `render` — create concat manifest, compute speed, run FFmpeg to generate timelapse
-* `run` — convenience: scan + fetch + render
+### Atomic File Operations
 
-### Minimum flags (conceptual)
+```go
+// Write to .partial
+f, _ := os.Create(path + ".partial")
+// ... write content ...
+f.Sync()
+f.Close()
+// Atomic rename
+os.Rename(path + ".partial", path)
+```
 
-* `--host`
-* `--user`
-* `--pass` or `--pass-file` or env-based equivalent
-* `--camera` (id or name)
-* `--date` (YYYY-MM-DD)
-* `--out`
+### Worker Pool Pattern
 
-Optional:
+- Fixed number of workers (goroutines)
+- Tasks sent via channel
+- Results collected via channel
+- Context cancellation stops workers
 
-* `--target 10m`
-* `--fps 30`
-* `--workers 3`
+### Error Wrapping (per uber-go/guide)
 
-(Exact CLI syntax is left to implementation, but these inputs must be supported.)
+```go
+// Separate call and check
+result, err := doSomething()
+if err != nil {
+    return fmt.Errorf("doing something (id=%s): %w", id, err)
+}
+```
 
 ---
 
-## 17. Out-of-Scope Extensions
+## 14. Out-of-Scope
 
-* Multi-camera, multi-day
-* Automatic daily scheduling
-* Cloud upload/sharing
-* Motion-only timelapse
-* Adding timestamp overlays (already present)
-* Smart scene detection / per-hour chapters
+- Multi-camera support
+- Multi-day batching
+- Automatic scheduling
+- Cloud upload
+- Motion-only filtering
+- Timestamp overlay (already burned in)
+- Smart scene detection
 
 ---
 
-## 18. Summary
+## 15. References
+
+### Official Documentation
 
-This spec defines a **simple and robust** one-day UniFi Protect timelapse workflow:
+- [UniFi Protect API Reference](https://developer.ui.com/protect/) - Official API documentation
+- [Getting Started Guide](https://developer.ui.com/protect/v6.2.83/gettingstarted) - API key setup and basics
+- [Ubiquiti Help Center - Official API](https://help.ui.com/hc/en-us/articles/30076656117655-Getting-Started-with-the-Official-UniFi-API) - Overview of UniFi APIs
 
-* Enumerate recording events for the day
-* Download each event MP4 deterministically
-* Concatenate clips in order (skipping gaps naturally)
-* Compute a speed factor so output is ~10 minutes
-* Re-encode with FFmpeg using `setpts` and constant FPS
+### Community Resources
 
+- [unifi-protect (Node.js)](https://github.com/hjdhjd/unifi-protect) - Complete TypeScript implementation
+- [pyunifiprotect (Python)](https://github.com/AngellusMortis/pyunifiprotect) - Python API client with CLI
+- [Video Download API Wiki](https://github.com/danielfernau/unifi-protect-video-downloader/wiki/How-the-API-for-video-downloads-works-(simplified)) - Video export endpoint details
STYLE.md
@@ -0,0 +1,72 @@
+---
+version: 2026-01-26
+---
+
+Go Style
+========
+
+This document captures local Go conventions. It is *normative* and *minimal*.  
+For everything not covered here, follow `uber-go/guide`. :contentReference[oaicite:1]{index=1}
+
+Errors
+------
+
+### Err Checks
+
+- Do not write `if err := f(); err != nil { ... }`.
+- Separate call and check into two statements.
+
+### Wrapping
+
+- Wrap errors only when adding *new actionable context*.
+- Do not wrap with repetitive or duplicate information.
+- Use `%w` for wrapping.
+- Do not restate underlying identifiers the callee already logs.
+
+### Format
+
+- Wrapping message format:  
+  `doing X (id=..., name=...): %w`
+- Keep lowercase, no punctuation, no “failed to”.
+
+### Inspection
+
+- Use `errors.Is` and `errors.As` for inspection in our own code.
+- Define sentinel errors for inspectable cases: `var ErrFoo = errors.New(...)`.
+
+Logging
+-------
+
+- Use only `log/slog`.
+- Do not log an error then return it unchanged.
+- Use structured logs (attributes, not formatted strings).
+- Libraries may accept loggers via options.
+
+### Formatting
+
+When a log call has multiple attributes, place each on its own line:
+
+```go
+// Good
+slog.Info("recv",
+    slog.String("type", "SecurityTypes"),
+    slog.Int("count", int(b[0])),
+    slog.String("raw", fmt.Sprintf("%x", b)))
+
+// Bad - line too long
+slog.Info("recv", slog.String("type", "SecurityTypes"), slog.Int("count", int(b[0])), slog.String("raw", fmt.Sprintf("%x", b)))
+```
+
+Single-attribute calls may stay on one line if short:
+
+```go
+slog.Info("bell received")
+slog.Warn("watcher error", slog.String("error", err.Error()))
+```
+
+HTTP
+----
+
+- Do not use HTTP frameworks.
+- Use `net/http` directly.
+