-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathengine.go
More file actions
399 lines (362 loc) · 11.1 KB
/
engine.go
File metadata and controls
399 lines (362 loc) · 11.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
package main
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
)
// ProgressFn is called as bytes stream in. total may be -1 if unknown.
type ProgressFn func(written, total int64)
type countingWriter struct {
written int64
total int64
onWrite ProgressFn
}
func (cw *countingWriter) Write(p []byte) (int, error) {
n := len(p)
cw.written += int64(n)
if cw.onWrite != nil {
cw.onWrite(cw.written, cw.total)
}
return n, nil
}
func downloadFile(dest, url string, onProgress ProgressFn) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status: %s", resp.Status)
}
out, err := os.Create(dest)
if err != nil {
return err
}
defer out.Close()
cw := &countingWriter{total: resp.ContentLength, onWrite: onProgress}
if _, err := io.Copy(io.MultiWriter(out, cw), resp.Body); err != nil {
return err
}
return nil
}
// githubAsset is the subset of GitHub's release-asset JSON we care about.
type githubAsset struct {
Name string `json:"name"`
BrowserDownloadURL string `json:"browser_download_url"`
}
type githubRelease struct {
TagName string `json:"tag_name"`
Assets []githubAsset `json:"assets"`
}
// latestLlamacppAsset resolves the correct llama.cpp release asset URL for
// this OS/arch by querying GitHub for the latest release.
func latestLlamacppAsset() (string, string, error) {
key := runtime.GOOS + "/" + runtime.GOARCH
suffix, ok := llamacppAssetSuffix[key]
if !ok {
return "", "", fmt.Errorf("no llama.cpp prebuilt available for %s", key)
}
resp, err := http.Get(llamacppLatestURL)
if err != nil {
return "", "", fmt.Errorf("fetch latest release: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", "", fmt.Errorf("GitHub API returned %s", resp.Status)
}
var rel githubRelease
if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil {
return "", "", fmt.Errorf("decode release JSON: %w", err)
}
for _, a := range rel.Assets {
if strings.HasSuffix(a.Name, suffix) {
return a.BrowserDownloadURL, rel.TagName, nil
}
}
return "", "", fmt.Errorf("no asset ending in %q in release %s", suffix, rel.TagName)
}
// downloadEngine fetches the latest llama.cpp prebuilt archive for the
// current platform, extracts it into the engine dir, and removes any legacy
// llamafile binary left over from older atlas.llm versions.
func downloadEngine(onProgress ProgressFn) error {
if isEngineDownloaded() {
return nil
}
url, _, err := latestLlamacppAsset()
if err != nil {
return err
}
dir, err := engineDir()
if err != nil {
return err
}
archiveName := "llamacpp" + filepath.Ext(url)
if strings.HasSuffix(url, ".tar.gz") {
archiveName = "llamacpp.tar.gz"
}
archivePath := filepath.Join(dir, archiveName)
if err := downloadFile(archivePath, url, onProgress); err != nil {
return err
}
defer os.Remove(archivePath)
if strings.HasSuffix(archiveName, ".zip") {
if err := extractZip(archivePath, dir); err != nil {
return fmt.Errorf("extract zip: %w", err)
}
} else {
if err := extractTarGz(archivePath, dir); err != nil {
return fmt.Errorf("extract tar.gz: %w", err)
}
}
if runtime.GOOS != "windows" {
if bin, err := findEngineBinary(); err == nil {
_ = os.Chmod(bin, 0755)
}
}
// Best-effort cleanup of the old llamafile binary from pre-0.4 installs.
if base, err := atlasDir(); err == nil {
for _, name := range []string{"llamafile", "llamafile.exe"} {
_ = os.Remove(filepath.Join(base, name))
}
}
return nil
}
func extractZip(src, destDir string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer r.Close()
cleanDest := filepath.Clean(destDir) + string(os.PathSeparator)
for _, f := range r.File {
target := filepath.Join(destDir, f.Name)
if !strings.HasPrefix(target, cleanDest) {
return fmt.Errorf("zip slip: %s", f.Name)
}
if f.FileInfo().IsDir() {
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
continue
}
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
return err
}
if err := writeZipEntry(f, target); err != nil {
return err
}
}
return nil
}
func writeZipEntry(f *zip.File, target string) error {
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close()
out, err := os.OpenFile(target, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, rc)
return err
}
func extractTarGz(src, destDir string) error {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
gzr, err := gzip.NewReader(f)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
cleanDest := filepath.Clean(destDir) + string(os.PathSeparator)
for {
hdr, err := tr.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
target := filepath.Join(destDir, hdr.Name)
if !strings.HasPrefix(target, cleanDest) {
return fmt.Errorf("tar slip: %s", hdr.Name)
}
switch hdr.Typeflag {
case tar.TypeDir:
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
case tar.TypeReg:
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
return err
}
if err := writeTarEntry(tr, target, os.FileMode(hdr.Mode)); err != nil {
return err
}
}
}
}
func writeTarEntry(tr *tar.Reader, target string, mode os.FileMode) error {
out, err := os.OpenFile(target, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, tr)
return err
}
// downloadModel fetches a model into models/. No-op if already present.
func downloadModel(m Model, onProgress ProgressFn) error {
p, err := modelPath(m)
if err != nil {
return err
}
if isModelDownloaded(m) {
return nil
}
return downloadFile(p, m.URL, onProgress)
}
// requireEngine returns the path to llama-cli[.exe] or an error asking the
// user to /download. Does NOT download automatically.
func requireEngine() (string, error) {
if !isEngineDownloaded() {
return "", fmt.Errorf("inference engine is not downloaded — run /download engine (or /download) in chat")
}
return findEngineBinary()
}
// requireModel returns the model path or an error asking the user to /download.
// Does NOT download automatically.
func requireModel(m Model) (string, error) {
p, err := modelPath(m)
if err != nil {
return "", err
}
if !isModelDownloaded(m) {
return "", fmt.Errorf("model %q is not downloaded — run /download %s in chat", m.Name, m.Name)
}
return p, nil
}
// runChat drives a /v1/chat/completions call against the persistent
// llama-server. The server is lazy-started on the first call per process
// (or whenever the active model changes) so the GGUF mmap + warmup cost is
// paid once per session, not once per turn.
//
// Using chat completions (instead of raw /completion) means llama-server
// applies the model's native chat template — Gemma 3's
// <start_of_turn>/<end_of_turn> sentinels, ChatML, etc. — and stops at the
// turn boundary. Raw completion with "User:/Assistant:" markers was causing
// the model to hallucinate additional fake turns after its real answer.
func runChat(msgs []ChatMsg, maxTokens int) (string, error) {
if _, err := requireEngine(); err != nil {
return "", err
}
if m, err := currentModel(); err == nil {
if _, err := requireModel(m); err != nil {
return "", err
}
}
s, err := ensureServer()
if err != nil {
return "", fmt.Errorf("server: %w", err)
}
out, err := s.ChatComplete(msgs, maxTokens)
if err != nil {
return "", fmt.Errorf("inference failed: %w", err)
}
return strings.TrimSpace(out), nil
}
// runSingleUser is a convenience wrapper for one-shot tasks (summarize,
// grep) that have no conversational history — just a single user prompt.
func runSingleUser(system, user string, maxTokens int) (string, error) {
msgs := []ChatMsg{}
if system != "" {
msgs = append(msgs, ChatMsg{Role: "system", Content: system})
}
msgs = append(msgs, ChatMsg{Role: "user", Content: user})
return runChat(msgs, maxTokens)
}
// summarizeMaxChars caps the amount of file content we send to the model
// per /summarize request. Roughly 10K chars ≈ 2.5K tokens, well inside the
// server's 16K ctx even after the system prompt, chat-template overhead,
// and the 512-token reply budget.
const summarizeMaxChars = 10000
func summarizeContent(content string) (string, error) {
truncated := content
if len(truncated) > summarizeMaxChars {
truncated = truncated[:summarizeMaxChars] + "\n\n... (file truncated for summary)"
}
return runSingleUser(
"You are a concise code summarizer. Respond with only 1-3 plain sentences describing the file's purpose. Do not use markdown, code blocks, or lists.",
"Summarize this file:\n\n"+truncated,
512,
)
}
type ChatMessage struct {
Role string // "user" or "assistant"
Content string
}
// runAgentStep advances a tool-enabled conversation by one round-trip: it
// POSTs the current message list (plus tool definitions) and returns the
// assistant content and any requested tool calls. Callers loop until the
// returned toolCalls list is empty.
func runAgentStep(msgs []ChatMsg, maxTokens int) (string, []ToolCall, error) {
if _, err := requireEngine(); err != nil {
return "", nil, err
}
if m, err := currentModel(); err == nil {
if _, err := requireModel(m); err != nil {
return "", nil, err
}
}
s, err := ensureServer()
if err != nil {
return "", nil, fmt.Errorf("server: %w", err)
}
content, calls, err := s.ChatCompleteWithTools(msgs, toolDefsJSON(), maxTokens)
if err != nil {
return "", nil, fmt.Errorf("inference failed: %w", err)
}
return strings.TrimSpace(content), calls, nil
}
// agentSystemPrompt is prepended to the conversation when tools are
// enabled. Tells the model it has filesystem + shell capabilities, and
// that destructive actions require user approval (so it doesn't loop on
// unexpected denials).
const agentSystemPrompt = `You are atlas, a concise coding assistant with access to the user's local project via tools. Use tools when you need to inspect or change files or run commands — don't guess about file contents. Destructive tools (write_file, edit_file, run_cmd) require the user to approve each call; if a call is denied, acknowledge and continue without retrying. After gathering what you need, answer plainly in markdown.`
func chat(history []ChatMessage, userInput string) (string, error) {
msgs := []ChatMsg{
{Role: "system", Content: "You are a concise, helpful coding assistant. Keep replies under three short paragraphs unless more detail is explicitly requested."},
}
for _, m := range history {
msgs = append(msgs, ChatMsg{Role: m.Role, Content: m.Content})
}
msgs = append(msgs, ChatMsg{Role: "user", Content: userInput})
cfg, _ := loadConfig()
return runChat(msgs, cfg.MaxTokens)
}
func formatBytes(n int64) string {
const unit = 1024
if n < unit {
return fmt.Sprintf("%d B", n)
}
div, exp := int64(unit), 0
for n2 := n / unit; n2 >= unit; n2 /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(n)/float64(div), "KMGTPE"[exp])
}