Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit38544c1

Browse files
authored
Merge pull requestex3ndr#43 from Sinan-Karakaya/main
FEAT: Added support for Bearer token in header for protected endpoints
2 parentsd6aaa95 +f7bd142 commit38544c1

File tree

8 files changed

+32
-18
lines changed

8 files changed

+32
-18
lines changed

‎package.json‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,11 @@
7676
"description":"Ollama Server Endpoint. Empty for local instance. Example: http://192.168.0.100:11434",
7777
"order":1
7878
},
79+
"inference.bearerToken": {
80+
"type":"string",
81+
"default":"",
82+
"description":"Auth Bearer token that should be used for secure requests. Leave empty if not desired."
83+
},
7984
"inference.model": {
8085
"type":"string",
8186
"enum": [

‎src/config.ts‎

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class Config {
1515
if(endpoint===''){
1616
endpoint='http://127.0.0.1:11434';
1717
}
18+
letbearerToken=config.get('bearerToken')asstring;
1819

1920
// Load general paremeters
2021
letmaxLines=config.get('maxLines')asnumber;
@@ -39,6 +40,7 @@ class Config {
3940

4041
return{
4142
endpoint,
43+
bearerToken,
4244
maxLines,
4345
maxTokens,
4446
temperature,

‎src/modules/lineGenerator.ts‎

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
1-
exportasyncfunction*lineGenerator(url:string,data:any):AsyncGenerator<string>{
2-
1+
exportasyncfunction*lineGenerator(url:string,data:any,bearerToken:string):AsyncGenerator<string>{
32
// Request
43
constcontroller=newAbortController();
54
letres=awaitfetch(url,{
6-
method:'POST',
7-
body:JSON.stringify(data),
8-
headers:{
9-
"Content-Type":"application/json",
10-
},
11-
signal:controller.signal
5+
method:'POST',
6+
body:JSON.stringify(data),
7+
headers:bearerToken ?{
8+
'Content-Type':'application/json',
9+
Authorization:`Bearer${bearerToken}`,
10+
} :{
11+
'Content-Type':'application/json',
12+
},
13+
signal:controller.signal,
1214
});
1315
if(!res.ok||!res.body){
1416
throwError('Unable to connect to backend');

‎src/modules/ollamaCheckModel.ts‎

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
11
import{info}from"./log";
22

3-
exportasyncfunctionollamaCheckModel(endpoint:string,model:string){
4-
3+
exportasyncfunctionollamaCheckModel(endpoint:string,model:string,bearerToken:string){
54
// Check if exists
6-
letres=awaitfetch(endpoint+'/api/tags');
5+
letres=awaitfetch(endpoint+'/api/tags',{
6+
headers:bearerToken ?{
7+
Authorization:`Bearer${bearerToken}`,
8+
} :{},
9+
});
710
if(!res.ok){
811
info(awaitres.text());
912
info(endpoint+'/api/tags');

‎src/modules/ollamaDownloadModel.ts‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import{lineGenerator}from"./lineGenerator";
22
import{info}from"./log";
33

4-
exportasyncfunctionollamaDownloadModel(endpoint:string,model:string){
4+
exportasyncfunctionollamaDownloadModel(endpoint:string,model:string,bearerToken:string){
55
info('Downloading model from ollama: '+model);
6-
forawait(letlineoflineGenerator(endpoint+'/api/pull',{name:model})){
6+
forawait(letlineoflineGenerator(endpoint+'/api/pull',{name:model},bearerToken)){
77
info('[DOWNLOAD] '+line);
88
}
99
}

‎src/modules/ollamaTokenGenerator.ts‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ export type OllamaToken = {
77
done:boolean
88
};
99

10-
exportasyncfunction*ollamaTokenGenerator(url:string,data:any):AsyncGenerator<OllamaToken>{
11-
forawait(letlineoflineGenerator(url,data)){
10+
exportasyncfunction*ollamaTokenGenerator(url:string,data:any,bearerToken:string):AsyncGenerator<OllamaToken>{
11+
forawait(letlineoflineGenerator(url,data,bearerToken)){
1212
info('Receive line: '+line);
1313
letparsed:OllamaToken;
1414
try{

‎src/prompts/autocomplete.ts‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { ModelFormat, adaptPrompt } from './processors/models';
55

66
exportasyncfunctionautocomplete(args:{
77
endpoint:string,
8+
bearerToken:string,
89
model:string,
910
format:ModelFormat,
1011
prefix:string,
@@ -33,7 +34,7 @@ export async function autocomplete(args: {
3334
letres='';
3435
lettotalLines=1;
3536
letblockStack:('['|'('|'{')[]=[];
36-
outer:forawait(lettokensofollamaTokenGenerator(args.endpoint+'/api/generate',data)){
37+
outer:forawait(lettokensofollamaTokenGenerator(args.endpoint+'/api/generate',data,args.bearerToken)){
3738
if(args.canceled&&args.canceled()){
3839
break;
3940
}

‎src/prompts/provider.ts‎

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
122122
try{
123123

124124
// Check model exists
125-
letmodelExists=awaitollamaCheckModel(inferenceConfig.endpoint,inferenceConfig.modelName);
125+
letmodelExists=awaitollamaCheckModel(inferenceConfig.endpoint,inferenceConfig.modelName,inferenceConfig.bearerToken);
126126
if(token.isCancellationRequested){
127127
info(`Canceled after AI completion.`);
128128
return;
@@ -147,7 +147,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
147147

148148
// Perform download
149149
this.update('sync~spin','Downloading');
150-
awaitollamaDownloadModel(inferenceConfig.endpoint,inferenceConfig.modelName);
150+
awaitollamaDownloadModel(inferenceConfig.endpoint,inferenceConfig.modelName,inferenceConfig.bearerToken);
151151
this.update('sync~spin','Llama Coder')
152152
}
153153
if(token.isCancellationRequested){
@@ -161,6 +161,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
161161
prefix:prepared.prefix,
162162
suffix:prepared.suffix,
163163
endpoint:inferenceConfig.endpoint,
164+
bearerToken:inferenceConfig.bearerToken,
164165
model:inferenceConfig.modelName,
165166
format:inferenceConfig.modelFormat,
166167
maxLines:inferenceConfig.maxLines,

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp