Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit9901581

Browse files
author
Sinan Karakaya
committed
feat: added Bearer token support
1 parent996ac71 commit9901581

File tree

8 files changed

+35
-19
lines changed

8 files changed

+35
-19
lines changed

‎package.json‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@
5858
"description":"Ollama Server Endpoint. Empty for local instance. Example: http://192.168.0.100:11434",
5959
"order":1
6060
},
61+
"inference.bearerToken": {
62+
"type":"string",
63+
"default":"",
64+
"description":"Auth Bearer token that should be used for secure requests. Leave empty if not desired."
65+
},
6166
"inference.model": {
6267
"type":"string",
6368
"enum": [

‎src/config.ts‎

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class Config {
1515
if(endpoint===''){
1616
endpoint='http://127.0.0.1:11434';
1717
}
18+
letbearerToken=config.get('bearerToken')asstring;
1819

1920
// Load general paremeters
2021
letmaxLines=config.get('maxLines')asnumber;
@@ -39,6 +40,7 @@ class Config {
3940

4041
return{
4142
endpoint,
43+
bearerToken,
4244
maxLines,
4345
maxTokens,
4446
temperature,

‎src/modules/lineGenerator.ts‎

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,17 @@
1-
exportasyncfunction*lineGenerator(url:string,data:any):AsyncGenerator<string>{
2-
1+
exportasyncfunction*lineGenerator(url:string,data:any,authToken:string):AsyncGenerator<string>{
32
// Request
43
constcontroller=newAbortController();
54
letres=awaitfetch(url,{
6-
method:'POST',
7-
body:JSON.stringify(data),
8-
headers:{
9-
"Content-Type":"application/json",
10-
},
11-
signal:controller.signal
12-
});
5+
method:'POST',
6+
body:JSON.stringify(data),
7+
headers:authToken ?{
8+
'Content-Type':'application/json',
9+
Authorization:`Bearer${authToken}`,
10+
} :{
11+
'Content-Type':'application/json',
12+
},
13+
signal:controller.signal,
14+
})
1315
if(!res.ok||!res.body){
1416
throwError('Unable to connect to backend');
1517
}

‎src/modules/ollamaCheckModel.ts‎

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,14 @@
11
import{info}from"./log";
22

3-
exportasyncfunctionollamaCheckModel(endpoint:string,model:string){
4-
3+
exportasyncfunctionollamaCheckModel(endpoint:string,model:string,authToken:string){
54
// Check if exists
6-
letres=awaitfetch(endpoint+'/api/tags');
5+
letres=awaitfetch(endpoint+'/api/tags',{
6+
headers:authToken
7+
?{
8+
Authorization:`Bearer${authToken}`,
9+
}
10+
:{},
11+
});
712
if(!res.ok){
813
info(awaitres.text());
914
info(endpoint+'/api/tags');

‎src/modules/ollamaDownloadModel.ts‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import{lineGenerator}from"./lineGenerator";
22
import{info}from"./log";
33

4-
exportasyncfunctionollamaDownloadModel(endpoint:string,model:string){
4+
exportasyncfunctionollamaDownloadModel(endpoint:string,model:string,authToken:string){
55
info('Downloading model from ollama: '+model);
6-
forawait(letlineoflineGenerator(endpoint+'/api/pull',{name:model})){
6+
forawait(letlineoflineGenerator(endpoint+'/api/pull',{name:model},authToken)){
77
info('[DOWNLOAD] '+line);
88
}
99
}

‎src/modules/ollamaTokenGenerator.ts‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ export type OllamaToken = {
77
done:boolean
88
};
99

10-
exportasyncfunction*ollamaTokenGenerator(url:string,data:any):AsyncGenerator<OllamaToken>{
11-
forawait(letlineoflineGenerator(url,data)){
10+
exportasyncfunction*ollamaTokenGenerator(url:string,data:any,authToken:string):AsyncGenerator<OllamaToken>{
11+
forawait(letlineoflineGenerator(url,data,authToken)){
1212
info('Receive line: '+line);
1313
letparsed:OllamaToken;
1414
try{

‎src/prompts/autocomplete.ts‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { ModelFormat, adaptPrompt } from './processors/models';
55

66
exportasyncfunctionautocomplete(args:{
77
endpoint:string,
8+
bearerToken:string,
89
model:string,
910
format:ModelFormat,
1011
prefix:string,
@@ -33,7 +34,7 @@ export async function autocomplete(args: {
3334
letres='';
3435
lettotalLines=1;
3536
letblockStack:('['|'('|'{')[]=[];
36-
outer:forawait(lettokensofollamaTokenGenerator(args.endpoint+'/api/generate',data)){
37+
outer:forawait(lettokensofollamaTokenGenerator(args.endpoint+'/api/generate',data,args.bearerToken)){
3738
if(args.canceled&&args.canceled()){
3839
break;
3940
}

‎src/prompts/provider.ts‎

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
8686
try{
8787

8888
// Check model exists
89-
letmodelExists=awaitollamaCheckModel(inferenceConfig.endpoint,inferenceConfig.modelName);
89+
letmodelExists=awaitollamaCheckModel(inferenceConfig.endpoint,inferenceConfig.modelName,inferenceConfig.bearerToken);
9090
if(token.isCancellationRequested){
9191
info(`Canceled after AI completion.`);
9292
return;
@@ -111,7 +111,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
111111

112112
// Perform download
113113
this.statusbar.text=`$(sync~spin) Downloading`;
114-
awaitollamaDownloadModel(inferenceConfig.endpoint,inferenceConfig.modelName);
114+
awaitollamaDownloadModel(inferenceConfig.endpoint,inferenceConfig.modelName,inferenceConfig.bearerToken);
115115
this.statusbar.text=`$(sync~spin) Llama Coder`;
116116
}
117117
if(token.isCancellationRequested){
@@ -125,6 +125,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
125125
prefix:prepared.prefix,
126126
suffix:prepared.suffix,
127127
endpoint:inferenceConfig.endpoint,
128+
bearerToken:inferenceConfig.bearerToken,
128129
model:inferenceConfig.modelName,
129130
format:inferenceConfig.modelFormat,
130131
maxLines:inferenceConfig.maxLines,

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp