Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit0dcffcb

Browse files
authored
Merge pull requestex3ndr#5 from wrapss/maxSettings
✨feat: Add maxLines, maxTokens and temperature settings
2 parents9be091b +f7afdc8 commit0dcffcb

File tree

4 files changed

+34
-4
lines changed

4 files changed

+34
-4
lines changed

‎package.json‎

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,21 @@
4141
"default":"http://127.0.0.1:11434/",
4242
"description":"Ollama Server Endpoint"
4343
},
44+
"inference.maxLines": {
45+
"type":"number",
46+
"default":16,
47+
"description":"Max number of lines to be keep."
48+
},
49+
"inference.maxTokens": {
50+
"type":"number",
51+
"default":256,
52+
"description":"Max number of new tokens to be generated."
53+
},
54+
"inference.temperature": {
55+
"type":"number",
56+
"default":0.2,
57+
"description":"Temperature of the model. Increasing the temperature will make the model answer more creatively."
58+
},
4459
"inference.model": {
4560
"type":"string",
4661
"enum": [

‎src/prompts/autocomplete.ts‎

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@ export async function autocomplete(args: {
88
model:string,
99
prefix:string,
1010
suffix:string,
11+
maxLines:number,
12+
maxTokens:number,
13+
temperature:number,
1114
canceled?:()=>boolean,
1215
}):Promise<string>{
1316

@@ -17,7 +20,8 @@ export async function autocomplete(args: {
1720
prompt:adaptPrompt({prefix:args.prefix,suffix:args.suffix,model:args.model}),
1821
raw:true,
1922
options:{
20-
num_predict:256
23+
num_predict:args.maxTokens,
24+
temperature:args.temperature
2125
}
2226
};
2327

@@ -75,9 +79,8 @@ export async function autocomplete(args: {
7579

7680
// Update total lines
7781
totalLines+=countSymbol(tokens.response,'\n');
78-
7982
// Break if too many lines and on top level
80-
if(totalLines>16&&blockStack.length===0){
83+
if(totalLines>args.maxLines&&blockStack.length===0){
8184
info('Too many lines, breaking.');
8285
break;
8386
}

‎src/prompts/provider.ts‎

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,9 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
6565
letconfig=vscode.workspace.getConfiguration('inference');
6666
letendpoint=config.get('endpoint')asstring;
6767
letmodel=config.get('model')asstring;
68+
letmaxLines=config.get('maxLines')asnumber;
69+
letmaxTokens=config.get('maxTokens')asnumber;
70+
lettemperature=config.get('temperature')asnumber;
6871
if(endpoint.endsWith('/')){
6972
endpoint=endpoint.slice(0,endpoint.length-1);
7073
}
@@ -98,6 +101,9 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
98101
suffix:prepared.suffix,
99102
endpoint:endpoint,
100103
model:model,
104+
maxLines:maxLines,
105+
maxTokens:maxTokens,
106+
temperature,
101107
canceled:()=>token.isCancellationRequested,
102108
});
103109
info(`AI completion completed:${res}`);

‎src/test/suite/extension.test.ts‎

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,18 @@ suite('Extension Test Suite', () => {
1717
test('should perform autocomplete',async()=>{
1818
letendpoint='http://127.0.0.1:11434';
1919
letmodel='codellama:7b-code-q4_K_S';// Lightweight llm for tests
20+
letmaxLines=16;
21+
letmaxTokens=256;
22+
lettemperature=0.2;
2023
letprompt='fun main(): ';
2124
letresult=awaitautocomplete({
2225
endpoint,
2326
model,
2427
prefix:prompt,
25-
suffix:''
28+
suffix:'',
29+
maxLines,
30+
maxTokens,
31+
temperature
2632
});
2733
console.warn(result);
2834
});

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp