Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions internal/security/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,16 @@ func (s *Scanner) ScanContainer(ctx context.Context, containerName, username str
// Wait briefly for device to be available
time.Sleep(2 * time.Second)

// Verify the mount has actual content — on ZFS backends, stopped containers
// or missing datasets result in an empty mount that clamdscan scans instantly,
// producing a false "clean" with 0s duration.
lsOut, _, _ := s.incusClient.ExecWithOutput(SecurityContainerName, []string{
"ls", mountPath,
})
if strings.TrimSpace(lsOut) == "" {
return fmt.Errorf("mount path %s is empty — container rootfs not accessible (container may be stopped or storage backend not mounted)", mountPath)
}

// Run clamdscan (uses the resident clamd daemon which keeps the virus DB in
// memory, avoiding the expensive DB reload that clamscan performs on each
// invocation). Directory exclusions are configured in clamd.conf ExcludePath.
Expand Down
17 changes: 15 additions & 2 deletions internal/security/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,10 +358,23 @@ type ScanJob struct {
CompletedAt *time.Time
}

// EnqueueScanJob inserts a new pending scan job and returns its ID
// EnqueueScanJob inserts a new pending scan job and returns its ID.
// If the container already has a pending or running job, it skips the insert
// and returns the existing job's ID to avoid queue bloat from repeated scan-all triggers.
func (s *Store) EnqueueScanJob(ctx context.Context, containerName, username string) (int64, error) {
var id int64
// Check for existing pending/running job for this container
var existingID int64
err := s.pool.QueryRow(ctx,
`SELECT id FROM scan_jobs WHERE container_name = $1 AND status IN ('pending', 'running') LIMIT 1`,
containerName,
).Scan(&existingID)
if err == nil {
// Already has an active job, skip
return existingID, nil
}

var id int64
err = s.pool.QueryRow(ctx,
`INSERT INTO scan_jobs (container_name, username) VALUES ($1, $2) RETURNING id`,
containerName, username,
).Scan(&id)
Expand Down
18 changes: 15 additions & 3 deletions internal/sentinel/keysync.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -118,9 +119,15 @@ func (ks *KeyStore) Apply() error {
seen := make(map[string]bool)
var routes []userRoute

// Collect from all backends — iterate deterministically by sorting is not needed
// since sshpiper matches by username (order doesn't affect routing correctness)
for _, bk := range ks.backends {
// Collect from all backends — sort backend IDs for deterministic iteration
// so the generated config is stable and doesn't cause unnecessary sshpiper restarts.
backendIDs := make([]string, 0, len(ks.backends))
for id := range ks.backends {
backendIDs = append(backendIDs, id)
}
sort.Strings(backendIDs)
for _, id := range backendIDs {
bk := ks.backends[id]
for _, u := range bk.users {
if seen[u.Username] {
continue // first backend to claim a user wins
Expand All @@ -135,6 +142,11 @@ func (ks *KeyStore) Apply() error {
}
ks.mu.RUnlock()

// Sort routes by username for deterministic config output
sort.Slice(routes, func(i, j int) bool {
return routes[i].username < routes[j].username
})

if len(routes) == 0 {
return fmt.Errorf("no users to configure")
}
Expand Down
7 changes: 5 additions & 2 deletions internal/server/core_services.go
Original file line number Diff line number Diff line change
Expand Up @@ -655,6 +655,9 @@ name = grafana
user = %s
password = %s
ssl_mode = disable
max_open_conn = 5
max_idle_conn = 2
conn_max_lifetime = 14400

[security]
allow_embedding = true
Expand Down Expand Up @@ -1161,8 +1164,8 @@ func (cs *CoreServices) EnsureSecurity(ctx context.Context) error {
config := incus.ContainerConfig{
Name: CoreSecurityContainer,
Image: "images:ubuntu/24.04",
CPU: "2",
Memory: "2GB",
CPU: "4",
Memory: "3GB",
AutoStart: true,
Disk: &incus.DiskDevice{
Path: "/",
Expand Down
13 changes: 12 additions & 1 deletion internal/server/dual_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"net"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"time"
Expand Down Expand Up @@ -272,7 +273,17 @@
log.Printf("Warning: Failed to setup Caddy: %v. Proxy features disabled.", err)
} else {
caddyAdminURL = adminURL
log.Printf("Caddy ready: %s", coreServices.GetCaddyIP())
caddyIP := coreServices.GetCaddyIP()
log.Printf("Caddy ready: %s", caddyIP)

// Add DNS override so containers resolve *.baseDomain to Caddy
// internally instead of going through the external IP (hairpin NAT).
dnsOverride := fmt.Sprintf("address=/%s/%s", config.BaseDomain, caddyIP)
if out, err := exec.Command("incus", "network", "set", "incusbr0", "raw.dnsmasq", dnsOverride).CombinedOutput(); err != nil {

Check failure

Code scanning / gosec

Subprocess launched with variable Error

Subprocess launched with variable
log.Printf("Warning: failed to set DNS override for %s: %v (%s)", config.BaseDomain, err, string(out))
} else {
log.Printf("DNS override: *.%s -> %s (internal hairpin)", config.BaseDomain, caddyIP)
}
}
}
}
Expand Down
7 changes: 6 additions & 1 deletion internal/server/peer.go
Original file line number Diff line number Diff line change
Expand Up @@ -397,10 +397,15 @@ func (pc *PeerClient) ForwardCreateContainer(authToken string, pbReq *pb.CreateC
}

// ForwardRequest forwards an arbitrary HTTP request to the peer and returns the response body.
// GET requests use a 5s timeout to avoid blocking the UI; POST/PUT use 30s for mutations.
func (pc *PeerClient) ForwardRequest(method, path, authToken string, body []byte) ([]byte, int, error) {
url := fmt.Sprintf("http://%s%s", pc.Addr, path)

ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
timeout := 5 * time.Second
if method != "GET" {
timeout = 30 * time.Second
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()

var bodyReader io.Reader
Expand Down
Loading