Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitf7bd142

Browse files
authored
Merge branch 'main' into main
2 parents84e2897 +d6aaa95 commitf7bd142

File tree

4 files changed

+75
-10
lines changed

4 files changed

+75
-10
lines changed

‎README.md‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#Llama Coder
22

3-
Llama Coder is a better and self-hosted Github Copilot replacement for VSStudioCode. Llama Coder uses[Ollama](https://ollama.ai) and codellama to provide autocomplete that runs on your hardware. Works best with Mac M1/M2/M3 or with RTX 4090.
3+
Llama Coder is a better and self-hosted Github Copilot replacement for[VS Code](https://github.com/microsoft/vscode). Llama Coder uses[Ollama](https://ollama.ai) and codellama to provide autocomplete that runs on your hardware. Works best with Mac M1/M2/M3 or with RTX 4090.
44

55
[VS Code Plugin](https://marketplace.visualstudio.com/items?itemName=ex3ndr.llama-coder)
66

@@ -14,7 +14,7 @@ Llama Coder is a better and self-hosted Github Copilot replacement for VS Studio
1414

1515
Minimum required RAM: 16GB is a minimum, more is better since even smallest model takes 5GB of RAM.
1616
The best way: dedicated machine with RTX 4090. Install[Ollama](https://ollama.ai) on this machine and configure endpoint in extension settings to offload to this machine.
17-
Second best way: run on MacBook M1/M2/M3 withenougth RAM (more == better, but 10gb extra would beenougth).
17+
Second best way: run on MacBook M1/M2/M3 withenough RAM (more == better, but 10gb extra would beenough).
1818
For windows notebooks: it runs good with decent GPU, but dedicated machine with a good GPU is recommended. Perfect if you have a dedicated gaming PC.
1919

2020
##Local Installation

‎package.json‎

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,24 @@
3333
"extensionKind": ["ui"],
3434
"main":"./out/extension.js",
3535
"contributes": {
36+
"commands": [
37+
{
38+
"command":"llama.openSettings",
39+
"title":"Llama Coder: Open Settings"
40+
},
41+
{
42+
"command":"llama.pause",
43+
"title":"Llama Coder: Pause"
44+
},
45+
{
46+
"command":"llama.resume",
47+
"title":"Llama Coder: Resume"
48+
},
49+
{
50+
"command":"llama.toggle",
51+
"title":"Llama Coder: Toggle"
52+
}
53+
],
3654
"configuration": [
3755
{
3856
"title":"Llama coder",
@@ -119,7 +137,7 @@
119137
"codellama",
120138
"deepseek"
121139
],
122-
"default":"codellama",
140+
"default":"stable-code",
123141
"description":"Custom model prompt format",
124142
"order":5
125143
},

‎src/extension.ts‎

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,12 @@ export function activate(context: vscode.ExtensionContext) {
99
info('Llama Coder is activated.');
1010

1111
// Create status bar
12-
constopenSettings='llama.openSettings';
13-
context.subscriptions.push(vscode.commands.registerCommand(openSettings,()=>{
12+
context.subscriptions.push(vscode.commands.registerCommand('llama.openSettings',()=>{
1413
vscode.commands.executeCommand('workbench.action.openSettings','@ext:ex3ndr.llama-coder');
1514
}));
15+
1616
letstatusBarItem=vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Right,100);
17-
statusBarItem.command=openSettings;
17+
statusBarItem.command='llama.toggle';
1818
statusBarItem.text=`$(chip) Llama Coder`;
1919
statusBarItem.show();
2020
context.subscriptions.push(statusBarItem);
@@ -23,6 +23,17 @@ export function activate(context: vscode.ExtensionContext) {
2323
constprovider=newPromptProvider(statusBarItem,context);
2424
letdisposable=vscode.languages.registerInlineCompletionItemProvider({pattern:'**',},provider);
2525
context.subscriptions.push(disposable);
26+
27+
context.subscriptions.push(vscode.commands.registerCommand('llama.pause',()=>{
28+
provider.paused=true;
29+
}));
30+
context.subscriptions.push(vscode.commands.registerCommand('llama.resume',()=>{
31+
provider.paused=false;
32+
}));
33+
context.subscriptions.push(vscode.commands.registerCommand('llama.toggle',()=>{
34+
provider.paused=!provider.paused;
35+
}));
36+
2637
}
2738

2839
exportfunctiondeactivate(){

‎src/prompts/provider.ts‎

Lines changed: 40 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,49 @@ import { ollamaCheckModel } from '../modules/ollamaCheckModel';
99
import{ollamaDownloadModel}from'../modules/ollamaDownloadModel';
1010
import{config}from'../config';
1111

12+
typeStatus={
13+
icon:string;
14+
text:string;
15+
};
16+
1217
exportclassPromptProviderimplementsvscode.InlineCompletionItemProvider{
1318

1419
lock=newAsyncLock();
1520
statusbar:vscode.StatusBarItem;
1621
context:vscode.ExtensionContext;
22+
private_paused:boolean=false;
23+
private_status:Status={icon:"chip",text:"Llama Coder"};
1724

1825
constructor(statusbar:vscode.StatusBarItem,context:vscode.ExtensionContext){
1926
this.statusbar=statusbar;
2027
this.context=context;
2128
}
29+
30+
publicsetpaused(value:boolean){
31+
this._paused=value;
32+
this.update();
33+
}
34+
35+
publicgetpaused():boolean{
36+
returnthis._paused;
37+
}
38+
39+
privateupdate(icon?:string,text?:string):void{
40+
this._status.icon=icon ?icon :this._status.icon;
41+
this._status.text=text ?text :this._status.text;
42+
43+
letstatusText='';
44+
letstatusTooltip='';
45+
if(this._paused){
46+
statusText=`$(sync-ignored)${this._status.text}`;
47+
statusTooltip=`${this._status.text} (Paused)`;
48+
}else{
49+
statusText=`$(${this._status.icon})${this._status.text}`;
50+
statusTooltip=`${this._status.text}`;
51+
}
52+
this.statusbar.text=statusText;
53+
this.statusbar.tooltip=statusTooltip;
54+
}
2255

2356
asyncdelayCompletion(delay:number,token:vscode.CancellationToken):Promise<boolean>{
2457
if(config.inference.delay<0){
@@ -37,6 +70,9 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
3770
}
3871

3972
try{
73+
if(this.paused){
74+
return;
75+
}
4076

4177
// Ignore unsupported documents
4278
if(!isSupported(document)){
@@ -82,7 +118,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
82118
letinferenceConfig=config.inference;
83119

84120
// Update status
85-
this.statusbar.text=`$(sync~spin)Llama Coder`;
121+
this.update('sync~spin','Llama Coder');
86122
try{
87123

88124
// Check model exists
@@ -110,9 +146,9 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
110146
}
111147

112148
// Perform download
113-
this.statusbar.text=`$(sync~spin)Downloading`;
149+
this.update('sync~spin','Downloading');
114150
awaitollamaDownloadModel(inferenceConfig.endpoint,inferenceConfig.modelName,inferenceConfig.bearerToken);
115-
this.statusbar.text=`$(sync~spin)Llama Coder`;
151+
this.update('sync~spin','Llama Coder')
116152
}
117153
if(token.isCancellationRequested){
118154
info(`Canceled after AI completion.`);
@@ -142,7 +178,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
142178
value:res
143179
});
144180
}finally{
145-
this.statusbar.text=`$(chip)Llama Coder`;
181+
this.update('chip','Llama Coder');
146182
}
147183
}else{
148184
if(cached!==null){

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp