Skip to content

Commit b5358b9

Browse files
authored
Add initial system prompt in ChatHandler and completion (#28)
* Add initial system prompt in ChatHandler and completion * Use the default prompt in anthropic provider * lint * Add a single option to the completion prompt
1 parent 975cd5b commit b5358b9

5 files changed

+84
-6
lines changed

src/chat-handler.ts

+20-3
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,12 @@ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'
1313
import {
1414
AIMessage,
1515
HumanMessage,
16-
mergeMessageRuns
16+
mergeMessageRuns,
17+
SystemMessage
1718
} from '@langchain/core/messages';
1819
import { UUID } from '@lumino/coreutils';
1920
import { getErrorMessage } from './llm-models';
21+
import { chatSystemPrompt } from './provider';
2022
import { IAIProvider } from './token';
2123

2224
export type ConnectionMessage = {
@@ -28,15 +30,28 @@ export class ChatHandler extends ChatModel {
2830
constructor(options: ChatHandler.IOptions) {
2931
super(options);
3032
this._aiProvider = options.aiProvider;
33+
this._prompt = chatSystemPrompt({ provider_name: this._aiProvider.name });
34+
3135
this._aiProvider.modelChange.connect(() => {
3236
this._errorMessage = this._aiProvider.chatError;
37+
this._prompt = chatSystemPrompt({ provider_name: this._aiProvider.name });
3338
});
3439
}
3540

3641
get provider(): BaseChatModel | null {
3742
return this._aiProvider.chatModel;
3843
}
3944

45+
/**
46+
* Getter and setter for the initial prompt.
47+
*/
48+
get prompt(): string {
49+
return this._prompt;
50+
}
51+
set prompt(value: string) {
52+
this._prompt = value;
53+
}
54+
4055
async sendMessage(message: INewMessage): Promise<boolean> {
4156
message.id = UUID.uuid4();
4257
const msg: IChatMessage = {
@@ -62,8 +77,9 @@ export class ChatHandler extends ChatModel {
6277

6378
this._history.messages.push(msg);
6479

65-
const messages = mergeMessageRuns(
66-
this._history.messages.map(msg => {
80+
const messages = mergeMessageRuns([new SystemMessage(this._prompt)]);
81+
messages.push(
82+
...this._history.messages.map(msg => {
6783
if (msg.sender.username === 'User') {
6884
return new HumanMessage(msg.body);
6985
}
@@ -117,6 +133,7 @@ export class ChatHandler extends ChatModel {
117133
}
118134

119135
private _aiProvider: IAIProvider;
136+
private _prompt: string;
120137
private _errorMessage: string = '';
121138
private _history: IChatHistory = { messages: [] };
122139
private _defaultErrorMessage = 'AI provider not configured';

src/llm-models/anthropic-completer.ts

+13-3
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
77
import { AIMessage, SystemMessage } from '@langchain/core/messages';
88

99
import { BaseCompleter, IBaseCompleter } from './base-completer';
10+
import { COMPLETION_SYSTEM_PROMPT } from '../provider';
1011

1112
export class AnthropicCompleter implements IBaseCompleter {
1213
constructor(options: BaseCompleter.IOptions) {
@@ -17,6 +18,16 @@ export class AnthropicCompleter implements IBaseCompleter {
1718
return this._anthropicProvider;
1819
}
1920

21+
/**
22+
* Getter and setter for the initial prompt.
23+
*/
24+
get prompt(): string {
25+
return this._prompt;
26+
}
27+
set prompt(value: string) {
28+
this._prompt = value;
29+
}
30+
2031
async fetch(
2132
request: CompletionHandler.IRequest,
2233
context: IInlineCompletionContext
@@ -28,9 +39,7 @@ export class AnthropicCompleter implements IBaseCompleter {
2839
const trimmedPrompt = prompt.trim();
2940

3041
const messages = [
31-
new SystemMessage(
32-
'You are a code-completion AI completing the following code from a Jupyter Notebook cell.'
33-
),
42+
new SystemMessage(this._prompt),
3443
new AIMessage(trimmedPrompt)
3544
];
3645

@@ -62,4 +71,5 @@ export class AnthropicCompleter implements IBaseCompleter {
6271
}
6372

6473
private _anthropicProvider: ChatAnthropic;
74+
private _prompt: string = COMPLETION_SYSTEM_PROMPT;
6575
}

src/llm-models/base-completer.ts

+5
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,11 @@ export interface IBaseCompleter {
1111
*/
1212
provider: BaseLanguageModel;
1313

14+
/**
15+
* The completion prompt.
16+
*/
17+
prompt: string;
18+
1419
/**
1520
* The function to fetch a new completion.
1621
*/

src/llm-models/codestral-completer.ts

+12
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import { Throttler } from '@lumino/polling';
88
import { CompletionRequest } from '@mistralai/mistralai';
99

1010
import { BaseCompleter, IBaseCompleter } from './base-completer';
11+
import { COMPLETION_SYSTEM_PROMPT } from '../provider';
1112

1213
/**
1314
* The Mistral API has a rate limit of 1 request per second
@@ -66,6 +67,16 @@ export class CodestralCompleter implements IBaseCompleter {
6667
return this._mistralProvider;
6768
}
6869

70+
/**
71+
* Getter and setter for the initial prompt.
72+
*/
73+
get prompt(): string {
74+
return this._prompt;
75+
}
76+
set prompt(value: string) {
77+
this._prompt = value;
78+
}
79+
6980
set requestCompletion(value: () => void) {
7081
this._requestCompletion = value;
7182
}
@@ -109,5 +120,6 @@ export class CodestralCompleter implements IBaseCompleter {
109120
private _requestCompletion?: () => void;
110121
private _throttler: Throttler;
111122
private _mistralProvider: MistralAI;
123+
private _prompt: string = COMPLETION_SYSTEM_PROMPT;
112124
private _currentData: CompletionRequest | null = null;
113125
}

src/provider.ts

+34
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,30 @@ import { CompletionProvider } from './completion-provider';
88
import { getChatModel, IBaseCompleter } from './llm-models';
99
import { IAIProvider } from './token';
1010

11+
export const chatSystemPrompt = (options: AIProvider.IPromptOptions) => `
12+
You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
13+
You are not a language model, but rather an application built on a foundation model from ${options.provider_name}.
14+
You are talkative and you provide lots of specific details from the foundation model's context.
15+
You may use Markdown to format your response.
16+
If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).
17+
If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.
18+
All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`.
19+
- Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\`
20+
- **Correct** response: \`You have \\(\\$80\\) remaining.\`
21+
- **Incorrect** response: \`You have $80 remaining.\`
22+
If you do not know the answer to a question, answer truthfully by responding that you do not know.
23+
The following is a friendly conversation between you and a human.
24+
`;
25+
26+
export const COMPLETION_SYSTEM_PROMPT = `
27+
You are an application built to provide helpful code completion suggestions.
28+
You should only produce code. Keep comments to minimum, use the
29+
programming language comment syntax. Produce clean code.
30+
The code is written in JupyterLab, a data analysis and code development
31+
environment which can execute code extended with additional syntax for
32+
interactive features, such as magics.
33+
`;
34+
1135
export class AIProvider implements IAIProvider {
1236
constructor(options: AIProvider.IOptions) {
1337
this._completionProvider = new CompletionProvider({
@@ -110,6 +134,16 @@ export namespace AIProvider {
110134
requestCompletion: () => void;
111135
}
112136

137+
/**
138+
* The options for the Chat system prompt.
139+
*/
140+
export interface IPromptOptions {
141+
/**
142+
* The provider name.
143+
*/
144+
provider_name: string;
145+
}
146+
113147
/**
114148
* This function indicates whether a key is writable in an object.
115149
* https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript

0 commit comments

Comments
 (0)