Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit9be091b

Browse files
committed
wip: adding deep-seek models
1 parent74d2a83 commit9be091b

File tree

5 files changed

+128
-101
lines changed

5 files changed

+128
-101
lines changed

‎package.json‎

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,15 @@
5454
"codellama:13b-code-fp16",
5555
"codellama:34b-code-q4_K_S",
5656
"codellama:34b-code-q4_K_M",
57-
"codellama:34b-code-q6_K"
57+
"codellama:34b-code-q6_K",
58+
"deepseek-coder:6.7b-instruct-q4_K_S",
59+
"deepseek-coder:6.7b-instruct-q4_K_M",
60+
"deepseek-coder:6.7b-instruct-q8_0",
61+
"deepseek-coder:6.7b-instruct-fp16",
62+
"deepseek-coder:33b-instruct-q4_K_S",
63+
"deepseek-coder:33b-instruct-q4_K_M",
64+
"deepseek-coder:33b-instruct-q8_0",
65+
"deepseek-coder:33b-instruct-fp16"
5866
],
5967
"default":"codellama:7b-code-q4_K_M",
6068
"description":"Inference model to use"

‎src/modules/log.ts‎

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,24 @@
11
importtypevscodefrom'vscode';
22

33
typeLogger={
4-
info(msg:string):void;
4+
info(message:string, ...args:any[]):void;
5+
warn(message:string, ...args:any[]):void;
56
};
67

78
letlogger:Logger|null=null;
89

910
exportfunctionregisterLogger(channel:vscode.LogOutputChannel){
10-
logger={
11-
info(msg:string){
12-
channel.appendLine(msg);
13-
}
14-
};
11+
logger=channel;
1512
}
1613

17-
exportfunctioninfo(src:string){
14+
exportfunctioninfo(message:string, ...args:any[]){
1815
if(logger){
19-
logger.info(src);
16+
logger.info(message, ...args);
17+
}
18+
}
19+
20+
exportfunctionwarn(message:string, ...args:any[]){
21+
if(logger){
22+
logger.warn(message, ...args);
2023
}
2124
}
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
exportfunctionadaptPrompt(args:{model:string,prefix:string,suffix:string}):string{
2+
3+
// Starcoder format
4+
if(args.model.startsWith('deepseek-coder')){
5+
return`<|fim▁begin|>${args.prefix}<|fim▁hole|>${args.suffix}<|fim▁end|>`;
6+
}
7+
8+
// Codellama format
9+
return`<PRE>${args.prefix} <SUF>${args.suffix} <MID>`;
10+
}

‎src/prompts/autocomplete.ts‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import{ollamaTokenGenerator}from'../modules/ollamaTokenGenerator';
22
import{countSymbol}from'../modules/text';
33
import{info}from'../modules/log';
4+
import{adaptPrompt}from'./adaptors/adaptPrompt';
45

56
exportasyncfunctionautocomplete(args:{
67
endpoint:string,
@@ -13,7 +14,7 @@ export async function autocomplete(args: {
1314
// Calculate arguments
1415
letdata={
1516
model:args.model,
16-
prompt:`<PRE>${args.prefix} <SUF>${args.suffix} <MID>`,// Codellama format
17+
prompt:adaptPrompt({prefix:args.prefix,suffix:args.suffix,model:args.model}),
1718
raw:true,
1819
options:{
1920
num_predict:256

‎src/prompts/provider.ts‎

Lines changed: 96 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
importvscodefrom'vscode';
2-
import{info}from'../modules/log';
2+
import{info,warn}from'../modules/log';
33
import{autocomplete}from'./autocomplete';
44
import{preparePrompt}from'./preparePrompt';
55
import{AsyncLock}from'../modules/lock';
@@ -19,116 +19,121 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
1919

2020
asyncprovideInlineCompletionItems(document:vscode.TextDocument,position:vscode.Position,context:vscode.InlineCompletionContext,token:vscode.CancellationToken):Promise<vscode.InlineCompletionItem[]|vscode.InlineCompletionList|undefined|null>{
2121

22-
// Ignore unsupported documents
23-
if(!isSupported(document)){
24-
info(`Unsupported document:${document.uri.toString()} ignored.`);
25-
return;
26-
}
27-
28-
// Ignore if not needed
29-
if(isNotNeeded(document,position)){
30-
info('No inline completion required');
31-
return;
32-
}
22+
try{
3323

34-
// Ignoreif already canceled
35-
if(token.isCancellationRequested){
36-
info(`Canceled before AI completion.`);
37-
return;
38-
}
24+
// Ignoreunsupported documents
25+
if(!isSupported(document)){
26+
info(`Unsupported document:${document.uri.toString()} ignored.`);
27+
return;
28+
}
3929

40-
// Execute in lock
41-
returnawaitthis.lock.inLock(async()=>{
30+
// Ignore if not needed
31+
if(isNotNeeded(document,position)){
32+
info('No inline completion required');
33+
return;
34+
}
4235

43-
// Prepare context
44-
letprepared=awaitpreparePrompt(document,position,context);
36+
// Ignore if already canceled
4537
if(token.isCancellationRequested){
4638
info(`Canceled before AI completion.`);
4739
return;
4840
}
4941

50-
//Result
51-
letres:string|null=null;
42+
//Execute in lock
43+
returnawaitthis.lock.inLock(async()=>{
5244

53-
// Check if in cache
54-
letcached=getFromPromptCache({
55-
prefix:prepared.prefix,
56-
suffix:prepared.suffix
57-
});
45+
// Prepare context
46+
letprepared=awaitpreparePrompt(document,position,context);
47+
if(token.isCancellationRequested){
48+
info(`Canceled before AI completion.`);
49+
return;
50+
}
5851

59-
// If not cached
60-
if(cached===undefined){
52+
// Result
53+
letres:string|null=null;
6154

62-
// Config
63-
letconfig=vscode.workspace.getConfiguration('inference');
64-
letendpoint=config.get('endpoint')asstring;
65-
letmodel=config.get('model')asstring;
66-
if(endpoint.endsWith('/')){
67-
endpoint=endpoint.slice(0,endpoint.length-1);
68-
}
55+
// Check if in cache
56+
letcached=getFromPromptCache({
57+
prefix:prepared.prefix,
58+
suffix:prepared.suffix
59+
});
6960

70-
// Update status
71-
this.statusbar.text=`$(sync~spin) Llama Coder`;
72-
try{
61+
// If not cached
62+
if(cached===undefined){
7363

74-
// Check model exists
75-
letmodelExists=awaitollamaCheckModel(endpoint,model);
76-
if(token.isCancellationRequested){
77-
info(`Canceled after AI completion.`);
78-
return;
64+
// Config
65+
letconfig=vscode.workspace.getConfiguration('inference');
66+
letendpoint=config.get('endpoint')asstring;
67+
letmodel=config.get('model')asstring;
68+
if(endpoint.endsWith('/')){
69+
endpoint=endpoint.slice(0,endpoint.length-1);
7970
}
8071

81-
// Download model if not exists
82-
if(!modelExists){
83-
this.statusbar.text=`$(sync~spin) Downloading`;
84-
awaitollamaDownloadModel(endpoint,model);
85-
this.statusbar.text=`$(sync~spin) Llama Coder`;
72+
// Update status
73+
this.statusbar.text=`$(sync~spin) Llama Coder`;
74+
try{
75+
76+
// Check model exists
77+
letmodelExists=awaitollamaCheckModel(endpoint,model);
78+
if(token.isCancellationRequested){
79+
info(`Canceled after AI completion.`);
80+
return;
81+
}
82+
83+
// Download model if not exists
84+
if(!modelExists){
85+
this.statusbar.text=`$(sync~spin) Downloading`;
86+
awaitollamaDownloadModel(endpoint,model);
87+
this.statusbar.text=`$(sync~spin) Llama Coder`;
88+
}
89+
if(token.isCancellationRequested){
90+
info(`Canceled after AI completion.`);
91+
return;
92+
}
93+
94+
// Run AI completion
95+
info(`Running AI completion...`);
96+
res=awaitautocomplete({
97+
prefix:prepared.prefix,
98+
suffix:prepared.suffix,
99+
endpoint:endpoint,
100+
model:model,
101+
canceled:()=>token.isCancellationRequested,
102+
});
103+
info(`AI completion completed:${res}`);
104+
105+
// Put to cache
106+
setPromptToCache({
107+
prefix:prepared.prefix,
108+
suffix:prepared.suffix,
109+
value:res
110+
});
111+
}finally{
112+
this.statusbar.text=`$(chip) Llama Coder`;
86113
}
87-
if(token.isCancellationRequested){
88-
info(`Canceled after AI completion.`);
89-
return;
114+
}else{
115+
if(cached!==null){
116+
res=cached;
90117
}
91-
92-
// Run AI completion
93-
info(`Running AI completion...`);
94-
res=awaitautocomplete({
95-
prefix:prepared.prefix,
96-
suffix:prepared.suffix,
97-
endpoint:endpoint,
98-
model:model,
99-
canceled:()=>token.isCancellationRequested,
100-
});
101-
info(`AI completion completed:${res}`);
102-
103-
// Put to cache
104-
setPromptToCache({
105-
prefix:prepared.prefix,
106-
suffix:prepared.suffix,
107-
value:res
108-
});
109-
}finally{
110-
this.statusbar.text=`$(chip) Llama Coder`;
111118
}
112-
}else{
113-
if(cached!==null){
114-
res=cached;
119+
if(token.isCancellationRequested){
120+
info(`Canceled after AI completion.`);
121+
return;
115122
}
116-
}
117-
if(token.isCancellationRequested){
118-
info(`Canceled after AI completion.`);
119-
return;
120-
}
121123

122-
// Return result
123-
if(res&&res.trim()!==''){
124-
return[{
125-
insertText:res,
126-
range:newvscode.Range(position,position),
127-
}];
128-
}
124+
// Return result
125+
if(res&&res.trim()!==''){
126+
return[{
127+
insertText:res,
128+
range:newvscode.Range(position,position),
129+
}];
130+
}
129131

130-
// Nothing to complete
131-
return;
132-
});
132+
// Nothing to complete
133+
return;
134+
});
135+
}catch(e){
136+
warn('Error during inference:',e);
137+
}
133138
}
134139
}

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp