From d79a6556a181f1f883916288b34c0f8c2fc645e9 Mon Sep 17 00:00:00 2001 From: Martin Evans Date: Wed, 6 Sep 2023 01:20:36 +0100 Subject: [PATCH 1/3] Removed 3 unused properties of `InferenceParams` --- LLama/Abstractions/IInferenceParams.cs | 15 --------------- LLama/Common/InferenceParams.cs | 13 +------------ 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/LLama/Abstractions/IInferenceParams.cs b/LLama/Abstractions/IInferenceParams.cs index e576366f5..08856ce3d 100644 --- a/LLama/Abstractions/IInferenceParams.cs +++ b/LLama/Abstractions/IInferenceParams.cs @@ -31,21 +31,6 @@ public interface IInferenceParams /// public IEnumerable AntiPrompts { get; set; } - /// - /// path to file for saving/loading model eval state - /// - public string PathSession { get; set; } - - /// - /// string to suffix user inputs with - /// - public string InputSuffix { get; set; } - - /// - /// string to prefix user inputs with - /// - public string InputPrefix { get; set; } - /// /// 0 or lower to use vocab size /// diff --git a/LLama/Common/InferenceParams.cs b/LLama/Common/InferenceParams.cs index 64d2652b1..baa78da11 100644 --- a/LLama/Common/InferenceParams.cs +++ b/LLama/Common/InferenceParams.cs @@ -29,18 +29,7 @@ public class InferenceParams : IInferenceParams /// Sequences where the model will stop generating further tokens. /// public IEnumerable AntiPrompts { get; set; } = Array.Empty(); - /// - /// path to file for saving/loading model eval state - /// - public string PathSession { get; set; } = string.Empty; - /// - /// string to suffix user inputs with - /// - public string InputSuffix { get; set; } = string.Empty; - /// - /// string to prefix user inputs with - /// - public string InputPrefix { get; set; } = string.Empty; + /// /// 0 or lower to use vocab size /// From a1b0349561b47e46fc041b843327a5574956732f Mon Sep 17 00:00:00 2001 From: Martin Evans Date: Sat, 9 Sep 2023 14:18:50 +0100 Subject: [PATCH 2/3] Removed `ModelAlias` property (unused) --- LLama/Abstractions/IModelParams.cs | 5 ----- LLama/Common/ModelParams.cs | 4 ---- 2 files changed, 9 deletions(-) diff --git a/LLama/Abstractions/IModelParams.cs b/LLama/Abstractions/IModelParams.cs index 700d98e2a..a8b87accb 100644 --- a/LLama/Abstractions/IModelParams.cs +++ b/LLama/Abstractions/IModelParams.cs @@ -57,11 +57,6 @@ public interface IModelParams /// string ModelPath { get; set; } - /// - /// model alias - /// - string ModelAlias { get; set; } - /// /// lora adapter path (lora_adapter) /// diff --git a/LLama/Common/ModelParams.cs b/LLama/Common/ModelParams.cs index e0b0c264b..f5c98fae9 100644 --- a/LLama/Common/ModelParams.cs +++ b/LLama/Common/ModelParams.cs @@ -53,10 +53,6 @@ public record ModelParams /// public string ModelPath { get; set; } /// - /// model alias - /// - public string ModelAlias { get; set; } = "unknown"; - /// /// lora adapter path (lora_adapter) /// public string LoraAdapter { get; set; } = string.Empty; From b47977300a659dbae89b418735e2ddaec2f7154c Mon Sep 17 00:00:00 2001 From: Martin Evans Date: Sat, 9 Sep 2023 14:57:47 +0100 Subject: [PATCH 3/3] Removed one more unused parameter --- LLama/Abstractions/IModelParams.cs | 5 ----- LLama/Common/ModelParams.cs | 9 +-------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/LLama/Abstractions/IModelParams.cs b/LLama/Abstractions/IModelParams.cs index a8b87accb..ad0608d75 100644 --- a/LLama/Abstractions/IModelParams.cs +++ b/LLama/Abstractions/IModelParams.cs @@ -77,11 +77,6 @@ public interface IModelParams /// int BatchSize { get; set; } - /// - /// Whether to convert eos to newline during the inference. - /// - bool ConvertEosToNewLine { get; set; } - /// /// Whether to use embedding mode. (embedding) Note that if this is set to true, /// The LLamaModel won't produce text response anymore. diff --git a/LLama/Common/ModelParams.cs b/LLama/Common/ModelParams.cs index f5c98fae9..1ce18dd8a 100644 --- a/LLama/Common/ModelParams.cs +++ b/LLama/Common/ModelParams.cs @@ -69,11 +69,6 @@ public record ModelParams /// public int BatchSize { get; set; } = 512; - /// - /// Whether to convert eos to newline during the inference. - /// - public bool ConvertEosToNewLine { get; set; } = false; - /// /// Whether to use embedding mode. (embedding) Note that if this is set to true, /// The LLamaModel won't produce text response anymore. @@ -137,7 +132,6 @@ private ModelParams() /// Base model path for the lora adapter (lora_base) /// Number of threads (-1 = autodetect) (n_threads) /// Batch size for prompt processing (must be >=32 to use BLAS) (n_batch) - /// Whether to convert eos to newline during the inference. /// Whether to use embedding mode. (embedding) Note that if this is set to true, The LLamaModel won't produce text response anymore. /// RoPE base frequency. /// RoPE frequency scaling factor @@ -148,7 +142,7 @@ public ModelParams(string modelPath, int contextSize = 512, int gpuLayerCount = int seed = 1337, bool useFp16Memory = true, bool useMemorymap = true, bool useMemoryLock = false, bool perplexity = false, string loraAdapter = "", string loraBase = "", int threads = -1, int batchSize = 512, - bool convertEosToNewLine = false, bool embeddingMode = false, + bool embeddingMode = false, float ropeFrequencyBase = 10000.0f, float ropeFrequencyScale = 1f, bool mulMatQ = false, string encoding = "UTF-8") { @@ -164,7 +158,6 @@ public ModelParams(string modelPath, int contextSize = 512, int gpuLayerCount = LoraBase = loraBase; Threads = threads == -1 ? Math.Max(Environment.ProcessorCount / 2, 1) : threads; BatchSize = batchSize; - ConvertEosToNewLine = convertEosToNewLine; EmbeddingMode = embeddingMode; RopeFrequencyBase = ropeFrequencyBase; RopeFrequencyScale = ropeFrequencyScale;