Skip to content

Commit c071cc0

Browse files
systemshiftsawka
andauthored
Perplexity api (#1432)
I have added Perplexity to the default AI models. I see Anthropic models are becoming part of the default as well, so I thought I should add a model that is specific for web search. This pull request is a work in progress; reviews and edit recommendations are welcome. --------- Co-authored-by: sawka <mike@commandline.dev>
1 parent b706d45 commit c071cc0

File tree

4 files changed

+244
-24
lines changed

4 files changed

+244
-24
lines changed

docs/docs/faq.mdx

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,23 @@ Set these keys:
6666

6767
Note: we set the `ai:*` key to true to clear all the existing "ai" keys so this config is a clean slate.
6868

69+
### How can I connect to Perplexity?
70+
71+
Open your [config file](./config) in Wave using `wsh editconfig`.
72+
73+
Set these keys:
74+
75+
```json
76+
{
77+
"ai:*": true,
78+
"ai:apitype": "perplexity",
79+
"ai:model": "llama-3.1-sonar-small-128k-online",
80+
"ai:apitoken": "<your perplexity API key>"
81+
}
82+
```
83+
84+
Note: we set the `ai:*` key to true to clear all the existing "ai" keys so this config is a clean slate.
85+
6986
To switch between models, consider [adding AI Presets](./presets) instead.
7087

7188
### How can I see the block numbers?

frontend/app/view/waveai/waveai.tsx

Lines changed: 38 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -180,40 +180,54 @@ export class WaveAiModel implements ViewModel {
180180
const presetKey = get(this.presetKey);
181181
const presetName = presets[presetKey]?.["display:name"] ?? "";
182182
const isCloud = isBlank(aiOpts.apitoken) && isBlank(aiOpts.baseurl);
183-
if (aiOpts?.apitype == "anthropic") {
184-
const modelName = aiOpts.model;
185-
viewTextChildren.push({
186-
elemtype: "iconbutton",
187-
icon: "globe",
188-
title: "Using Remote Antropic API (" + modelName + ")",
189-
noAction: true,
190-
});
191-
} else if (isCloud) {
192-
viewTextChildren.push({
193-
elemtype: "iconbutton",
194-
icon: "cloud",
195-
title: "Using Wave's AI Proxy (gpt-4o-mini)",
196-
noAction: true,
197-
});
198-
} else {
199-
const baseUrl = aiOpts.baseurl ?? "OpenAI Default Endpoint";
200-
const modelName = aiOpts.model;
201-
if (baseUrl.startsWith("http://localhost") || baseUrl.startsWith("http://127.0.0.1")) {
183+
184+
// Handle known API providers
185+
switch (aiOpts?.apitype) {
186+
case "anthropic":
202187
viewTextChildren.push({
203188
elemtype: "iconbutton",
204-
icon: "location-dot",
205-
title: "Using Local Model @ " + baseUrl + " (" + modelName + ")",
189+
icon: "globe",
190+
title: `Using Remote Anthropic API (${aiOpts.model})`,
206191
noAction: true,
207192
});
208-
} else {
193+
break;
194+
case "perplexity":
209195
viewTextChildren.push({
210196
elemtype: "iconbutton",
211197
icon: "globe",
212-
title: "Using Remote Model @ " + baseUrl + " (" + modelName + ")",
198+
title: `Using Remote Perplexity API (${aiOpts.model})`,
213199
noAction: true,
214200
});
215-
}
201+
break;
202+
default:
203+
if (isCloud) {
204+
viewTextChildren.push({
205+
elemtype: "iconbutton",
206+
icon: "cloud",
207+
title: "Using Wave's AI Proxy (gpt-4o-mini)",
208+
noAction: true,
209+
});
210+
} else {
211+
const baseUrl = aiOpts.baseurl ?? "OpenAI Default Endpoint";
212+
const modelName = aiOpts.model;
213+
if (baseUrl.startsWith("http://localhost") || baseUrl.startsWith("http://127.0.0.1")) {
214+
viewTextChildren.push({
215+
elemtype: "iconbutton",
216+
icon: "location-dot",
217+
title: `Using Local Model @ ${baseUrl} (${modelName})`,
218+
noAction: true,
219+
});
220+
} else {
221+
viewTextChildren.push({
222+
elemtype: "iconbutton",
223+
icon: "globe",
224+
title: `Using Remote Model @ ${baseUrl} (${modelName})`,
225+
noAction: true,
226+
});
227+
}
228+
}
216229
}
230+
217231
const dropdownItems = Object.entries(presets)
218232
.sort((a, b) => ((a[1]["display:order"] ?? 0) > (b[1]["display:order"] ?? 0) ? 1 : -1))
219233
.map(

pkg/waveai/perplexitybackend.go

Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
// Copyright 2024, Command Line Inc.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
package waveai
5+
6+
import (
7+
"bufio"
8+
"context"
9+
"encoding/json"
10+
"errors"
11+
"fmt"
12+
"io"
13+
"net/http"
14+
"strings"
15+
16+
"github.com/wavetermdev/waveterm/pkg/panichandler"
17+
"github.com/wavetermdev/waveterm/pkg/wshrpc"
18+
)
19+
20+
type PerplexityBackend struct{}
21+
22+
var _ AIBackend = PerplexityBackend{}
23+
24+
// Perplexity API request types
25+
type perplexityMessage struct {
26+
Role string `json:"role"`
27+
Content string `json:"content"`
28+
}
29+
30+
type perplexityRequest struct {
31+
Model string `json:"model"`
32+
Messages []perplexityMessage `json:"messages"`
33+
Stream bool `json:"stream"`
34+
}
35+
36+
// Perplexity API response types
37+
type perplexityResponseDelta struct {
38+
Content string `json:"content"`
39+
}
40+
41+
type perplexityResponseChoice struct {
42+
Delta perplexityResponseDelta `json:"delta"`
43+
FinishReason string `json:"finish_reason"`
44+
}
45+
46+
type perplexityResponse struct {
47+
ID string `json:"id"`
48+
Choices []perplexityResponseChoice `json:"choices"`
49+
Model string `json:"model"`
50+
}
51+
52+
func (PerplexityBackend) StreamCompletion(ctx context.Context, request wshrpc.OpenAiStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.OpenAIPacketType] {
53+
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.OpenAIPacketType])
54+
55+
go func() {
56+
defer func() {
57+
panicErr := panichandler.PanicHandler("PerplexityBackend.StreamCompletion")
58+
if panicErr != nil {
59+
rtn <- makeAIError(panicErr)
60+
}
61+
close(rtn)
62+
}()
63+
64+
if request.Opts == nil {
65+
rtn <- makeAIError(errors.New("no perplexity opts found"))
66+
return
67+
}
68+
69+
model := request.Opts.Model
70+
if model == "" {
71+
model = "llama-3.1-sonar-small-128k-online"
72+
}
73+
74+
// Convert messages format
75+
var messages []perplexityMessage
76+
for _, msg := range request.Prompt {
77+
role := "user"
78+
if msg.Role == "assistant" {
79+
role = "assistant"
80+
} else if msg.Role == "system" {
81+
role = "system"
82+
}
83+
84+
messages = append(messages, perplexityMessage{
85+
Role: role,
86+
Content: msg.Content,
87+
})
88+
}
89+
90+
perplexityReq := perplexityRequest{
91+
Model: model,
92+
Messages: messages,
93+
Stream: true,
94+
}
95+
96+
reqBody, err := json.Marshal(perplexityReq)
97+
if err != nil {
98+
rtn <- makeAIError(fmt.Errorf("failed to marshal perplexity request: %v", err))
99+
return
100+
}
101+
102+
req, err := http.NewRequestWithContext(ctx, "POST", "https://api.perplexity.ai/chat/completions", strings.NewReader(string(reqBody)))
103+
if err != nil {
104+
rtn <- makeAIError(fmt.Errorf("failed to create perplexity request: %v", err))
105+
return
106+
}
107+
108+
req.Header.Set("Content-Type", "application/json")
109+
req.Header.Set("Authorization", "Bearer "+request.Opts.APIToken)
110+
111+
client := &http.Client{}
112+
resp, err := client.Do(req)
113+
if err != nil {
114+
rtn <- makeAIError(fmt.Errorf("failed to send perplexity request: %v", err))
115+
return
116+
}
117+
defer resp.Body.Close()
118+
119+
if resp.StatusCode != http.StatusOK {
120+
bodyBytes, _ := io.ReadAll(resp.Body)
121+
rtn <- makeAIError(fmt.Errorf("Perplexity API error: %s - %s", resp.Status, string(bodyBytes)))
122+
return
123+
}
124+
125+
reader := bufio.NewReader(resp.Body)
126+
sentHeader := false
127+
128+
for {
129+
// Check for context cancellation
130+
select {
131+
case <-ctx.Done():
132+
rtn <- makeAIError(fmt.Errorf("request cancelled: %v", ctx.Err()))
133+
return
134+
default:
135+
}
136+
137+
line, err := reader.ReadString('\n')
138+
if err == io.EOF {
139+
break
140+
}
141+
if err != nil {
142+
rtn <- makeAIError(fmt.Errorf("error reading stream: %v", err))
143+
break
144+
}
145+
146+
line = strings.TrimSpace(line)
147+
if !strings.HasPrefix(line, "data: ") {
148+
continue
149+
}
150+
151+
data := strings.TrimPrefix(line, "data: ")
152+
if data == "[DONE]" {
153+
break
154+
}
155+
156+
var response perplexityResponse
157+
if err := json.Unmarshal([]byte(data), &response); err != nil {
158+
rtn <- makeAIError(fmt.Errorf("error unmarshaling response: %v", err))
159+
break
160+
}
161+
162+
if !sentHeader {
163+
pk := MakeOpenAIPacket()
164+
pk.Model = response.Model
165+
rtn <- wshrpc.RespOrErrorUnion[wshrpc.OpenAIPacketType]{Response: *pk}
166+
sentHeader = true
167+
}
168+
169+
for _, choice := range response.Choices {
170+
pk := MakeOpenAIPacket()
171+
pk.Text = choice.Delta.Content
172+
pk.FinishReason = choice.FinishReason
173+
rtn <- wshrpc.RespOrErrorUnion[wshrpc.OpenAIPacketType]{Response: *pk}
174+
}
175+
}
176+
}()
177+
178+
return rtn
179+
}

pkg/waveai/waveai.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ const OpenAICloudReqStr = "openai-cloudreq"
1717
const PacketEOFStr = "EOF"
1818
const DefaultAzureAPIVersion = "2023-05-15"
1919
const ApiType_Anthropic = "anthropic"
20+
const ApiType_Perplexity = "perplexity"
2021

2122
type OpenAICmdInfoPacketOutputType struct {
2223
Model string `json:"model,omitempty"`
@@ -74,6 +75,15 @@ func RunAICommand(ctx context.Context, request wshrpc.OpenAiStreamRequest) chan
7475
anthropicBackend := AnthropicBackend{}
7576
return anthropicBackend.StreamCompletion(ctx, request)
7677
}
78+
if request.Opts.APIType == ApiType_Perplexity {
79+
endpoint := request.Opts.BaseURL
80+
if endpoint == "" {
81+
endpoint = "default"
82+
}
83+
log.Printf("sending ai chat message to perplexity endpoint %q using model %s\n", endpoint, request.Opts.Model)
84+
perplexityBackend := PerplexityBackend{}
85+
return perplexityBackend.StreamCompletion(ctx, request)
86+
}
7787
if IsCloudAIRequest(request.Opts) {
7888
log.Print("sending ai chat message to default waveterm cloud endpoint\n")
7989
cloudBackend := WaveAICloudBackend{}

0 commit comments

Comments
 (0)