Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ on:
- LILYPAD-205
- LILYPAD-207
- LILYPAD-208
- add-ts-anthropic
- LILYPAD-209

pull_request:
Expand All @@ -18,7 +19,8 @@ on:
- LILYPAD-205
- LILYPAD-207
- LILYPAD-208
- LILYPAD-209
- add-ts-anthropic
- LILYPAD-209

jobs:
codespell:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/sdk_generation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ on:
- LILYPAD-205
- LILYPAD-207
- LILYPAD-208
- add-ts-anthropic
- LILYPAD-209

pull_request:
Expand All @@ -18,6 +19,7 @@ on:
- LILYPAD-205
- LILYPAD-207
- LILYPAD-208
- add-ts-anthropic
- LILYPAD-209

jobs:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ on:
- LILYPAD-205
- LILYPAD-207
- LILYPAD-208
- add-ts-anthropic
- LILYPAD-209
pull_request:
branches:
Expand All @@ -17,6 +18,7 @@ on:
- LILYPAD-205
- LILYPAD-207
- LILYPAD-208
- add-ts-anthropic
- LILYPAD-209

jobs:
Expand Down
7 changes: 3 additions & 4 deletions sdks/typescript/src/instrumentors/openai-hook.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -559,10 +559,9 @@ describe('setupOpenAIHooks', () => {
'gen_ai.system': 'openai',
content: 'Hello',
});
expect(mockSpan.addEvent).toHaveBeenCalledWith('gen_ai.assistant.message', {
'gen_ai.system': 'openai',
content: '{"type":"text","text":"Hi there"}',
});
// Assistant messages in the request are not recorded as events
// Only assistant messages from the response are recorded
expect(mockSpan.addEvent).toHaveBeenCalledTimes(2);
});

it('should handle successful response', async () => {
Expand Down
30 changes: 18 additions & 12 deletions sdks/typescript/src/instrumentors/openai-hook.ts
Original file line number Diff line number Diff line change
Expand Up @@ -201,19 +201,25 @@ function wrapChatCompletionsCreate(
// Record messages - match Python SDK format
if (params?.messages) {
params.messages.forEach((message) => {
const eventName = `gen_ai.${message.role}.message`;
const attributes: Attributes = {
[SEMATTRS_GEN_AI_SYSTEM]: 'openai',
};

if (message.content) {
attributes['content'] =
typeof message.content === 'string'
? message.content
: safeStringify(message.content);
if (message.role === 'system') {
span.addEvent('gen_ai.system.message', {
[SEMATTRS_GEN_AI_SYSTEM]: 'openai',
content:
typeof message.content === 'string'
? message.content
: safeStringify(message.content),
});
} else if (message.role === 'user') {
span.addEvent('gen_ai.user.message', {
[SEMATTRS_GEN_AI_SYSTEM]: 'openai',
content:
typeof message.content === 'string'
? message.content
: safeStringify(message.content),
});
}

span.addEvent(eventName, attributes);
// Note: assistant messages in the request are not recorded as events
// Only the response assistant message is recorded as gen_ai.choice
});
}

Expand Down
52 changes: 38 additions & 14 deletions sdks/typescript/src/instrumentors/openai-otel-instrumentation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -376,13 +376,25 @@ export class OpenAIInstrumentation extends InstrumentationBase {
// Record messages
if (params?.messages) {
params.messages.forEach((message: any) => {
span.addEvent(`gen_ai.${message.role}.message`, {
'gen_ai.system': 'openai',
content:
typeof message.content === 'string'
? message.content
: JSON.stringify(message.content),
});
if (message.role === 'system') {
span.addEvent('gen_ai.system.message', {
'gen_ai.system': 'openai',
content:
typeof message.content === 'string'
? message.content
: JSON.stringify(message.content),
});
} else if (message.role === 'user') {
span.addEvent('gen_ai.user.message', {
'gen_ai.system': 'openai',
content:
typeof message.content === 'string'
? message.content
: JSON.stringify(message.content),
});
}
// Note: assistant messages in the request are not recorded as events
// Only the response assistant message is recorded as gen_ai.choice
});
}

Expand Down Expand Up @@ -629,13 +641,25 @@ export class OpenAIInstrumentation extends InstrumentationBase {
// Record messages
if (params?.messages) {
params.messages.forEach((message: any) => {
span.addEvent(`gen_ai.${message.role}.message`, {
'gen_ai.system': 'openai',
content:
typeof message.content === 'string'
? message.content
: JSON.stringify(message.content),
});
if (message.role === 'system') {
span.addEvent('gen_ai.system.message', {
'gen_ai.system': 'openai',
content:
typeof message.content === 'string'
? message.content
: JSON.stringify(message.content),
});
} else if (message.role === 'user') {
span.addEvent('gen_ai.user.message', {
'gen_ai.system': 'openai',
content:
typeof message.content === 'string'
? message.content
: JSON.stringify(message.content),
});
}
// Note: assistant messages in the request are not recorded as events
// Only the response assistant message is recorded as gen_ai.choice
});
}

Expand Down
2 changes: 1 addition & 1 deletion sdks/typescript/src/test-utils/setup.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { vi } from 'vitest';
import { vi, beforeEach } from 'vitest';

// Mock console methods to avoid noise in tests
global.console = {
Expand Down
117 changes: 117 additions & 0 deletions sdks/typescript/src/types/gemini.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
/**
* Basic type definitions for Google Gemini integration
* These types provide minimal typing for the Google AI SDK integration
*/

export interface GeminiContent {
role: 'user' | 'model' | 'system';
parts: Array<{
text?: string;
inline_data?: {
mime_type: string;
data: string;
};
}>;
}

export interface GeminiGenerateContentParams {
model?: string;
contents: GeminiContent[];
safety_settings?: Array<{
category: string;
threshold: string;
}>;
generation_config?: {
temperature?: number;
top_p?: number;
top_k?: number;
candidate_count?: number;
max_output_tokens?: number;
stop_sequences?: string[];
};
}

export interface GeminiGenerateContentResponse {
candidates: Array<{
content: {
parts: Array<{
text: string;
}>;
role: string;
};
finish_reason: 'STOP' | 'MAX_TOKENS' | 'SAFETY' | 'RECITATION' | 'OTHER';
index: number;
safety_ratings?: Array<{
category: string;
probability: string;
}>;
}>;
prompt_feedback?: {
block_reason?: string;
safety_ratings?: Array<{
category: string;
probability: string;
}>;
};
usage_metadata?: {
prompt_token_count: number;
candidates_token_count: number;
total_token_count: number;
};
}

export interface GeminiGenerateContentStreamChunk {
candidates?: Array<{
content: {
parts: Array<{
text: string;
}>;
role?: string;
};
finish_reason?: 'STOP' | 'MAX_TOKENS' | 'SAFETY' | 'RECITATION' | 'OTHER';
index: number;
safety_ratings?: Array<{
category: string;
probability: string;
}>;
}>;
usage_metadata?: {
prompt_token_count: number;
candidates_token_count: number;
total_token_count: number;
};
}

// Type for generateContent and generateContentStream functions
export type GenerateContentFunction = (
params: GeminiGenerateContentParams | string,
options?: unknown,
) => Promise<GeminiGenerateContentResponse>;

export type GenerateContentStreamFunction = (
params: GeminiGenerateContentParams | string,
options?: unknown,
) => Promise<AsyncIterable<GeminiGenerateContentStreamChunk>>;

// Interface for GenerativeModel
export interface GenerativeModel {
generateContent?: GenerateContentFunction;
generateContentStream?: GenerateContentStreamFunction;
model?: string;
}

// Interface for Gemini-like instances
export interface GeminiLike {
getGenerativeModel?: (config: { model: string }) => GenerativeModel;
}

export type GeminiModule =
| GeminiClass
| {
default?: GeminiClass;
GoogleGenerativeAI?: GeminiClass;
};

export interface GeminiClass {
prototype: GeminiLike;
}
Loading