From 3d81cc98c14e60170b203e410684b8be5043115f Mon Sep 17 00:00:00 2001 From: Kevsnz <15689588+Kevsnz@users.noreply.github.com> Date: Tue, 6 Feb 2024 19:06:42 +0500 Subject: [PATCH] Added option to delay completion request --- package.json | 8 ++++++++ src/config.ts | 5 ++++- src/prompts/provider.ts | 23 ++++++++++++++--------- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/package.json b/package.json index 9a8b19f..76731f4 100644 --- a/package.json +++ b/package.json @@ -109,6 +109,14 @@ "default": 256, "description": "Max number of new tokens to be generated.", "order": 7 + }, + "inference.delay": { + "type": "number", + "default": 250, + "description": "Completion request delay in milliseconds (0 - no delay, -1 - no completions).", + "order": 8, + "minimum": -1, + "maximum": 5000 } } } diff --git a/src/config.ts b/src/config.ts index bb10809..471384e 100644 --- a/src/config.ts +++ b/src/config.ts @@ -35,13 +35,16 @@ class Config { } } + let delay = config.get('delay') as number; + return { endpoint, maxLines, maxTokens, temperature, modelName, - modelFormat + modelFormat, + delay }; } diff --git a/src/prompts/provider.ts b/src/prompts/provider.ts index 0e03c95..2c78daf 100644 --- a/src/prompts/provider.ts +++ b/src/prompts/provider.ts @@ -20,7 +20,21 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider { this.context = context; } + async delayCompletion(delay: number, token: vscode.CancellationToken): Promise { + if (config.inference.delay < 0) { + return false; + } + await new Promise(p => setTimeout(p, delay)); + if (token.isCancellationRequested) { + return false; + } + return true; + } + async provideInlineCompletionItems(document: vscode.TextDocument, position: vscode.Position, context: vscode.InlineCompletionContext, token: vscode.CancellationToken): Promise { + if (!await this.delayCompletion(config.inference.delay, token)) { + return; + } try { @@ -66,15 +80,6 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider { // Config let inferenceConfig = config.inference; - // let config = vscode.workspace.getConfiguration('inference'); - // let endpoint = config.get('endpoint') as string; - // let model = config.get('model') as string; - // let maxLines = config.get('maxLines') as number; - // let maxTokens = config.get('maxTokens') as number; - // let temperature = config.get('temperature') as number; - // if (endpoint.endsWith('/')) { - // endpoint = endpoint.slice(0, endpoint.length - 1); - // } // Update status this.statusbar.text = `$(sync~spin) Llama Coder`;