@@ -122,7 +122,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
122122try {
123123
124124// Check model exists
125- let modelExists = await ollamaCheckModel ( inferenceConfig . endpoint , inferenceConfig . modelName ) ;
125+ let modelExists = await ollamaCheckModel ( inferenceConfig . endpoint , inferenceConfig . modelName , inferenceConfig . bearerToken ) ;
126126if ( token . isCancellationRequested ) {
127127info ( `Canceled after AI completion.` ) ;
128128return ;
@@ -147,7 +147,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
147147
148148// Perform download
149149this . update ( 'sync~spin' , 'Downloading' ) ;
150- await ollamaDownloadModel ( inferenceConfig . endpoint , inferenceConfig . modelName ) ;
150+ await ollamaDownloadModel ( inferenceConfig . endpoint , inferenceConfig . modelName , inferenceConfig . bearerToken ) ;
151151this . update ( 'sync~spin' , 'Llama Coder' )
152152}
153153if ( token . isCancellationRequested ) {
@@ -161,6 +161,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
161161prefix :prepared . prefix ,
162162suffix :prepared . suffix ,
163163endpoint :inferenceConfig . endpoint ,
164+ bearerToken :inferenceConfig . bearerToken ,
164165model :inferenceConfig . modelName ,
165166format :inferenceConfig . modelFormat ,
166167maxLines :inferenceConfig . maxLines ,