diff --git a/Elsa.SemanticKernel/Activities/SemanticKernel.cs b/Elsa.SemanticKernel/Activities/SemanticKernel.cs index d4f65f62a..72f08c15a 100644 --- a/Elsa.SemanticKernel/Activities/SemanticKernel.cs +++ b/Elsa.SemanticKernel/Activities/SemanticKernel.cs @@ -53,6 +53,12 @@ public class SemanticKernelSkill : CodeActivity DefaultValue = "ChatCompletion")] public Input FunctionName { get; set; } +/* [Input( + Description = "Mockup - don't actually call the AI, just output the prompts", + UIHint = InputUIHints.Checkbox, + DefaultValue = false)] + public Input Mockup { get; set; } */ + /// protected override async ValueTask ExecuteAsync(ActivityExecutionContext workflowContext) { @@ -62,56 +68,68 @@ public class SemanticKernelSkill : CodeActivity var systemPrompt = SysPrompt.Get(workflowContext); var maxRetries = MaxRetries.Get(workflowContext); var prompt = Prompt.Get(workflowContext); - var kernelSettings = KernelSettings.LoadSettings(); - var kernelConfig = new KernelConfig(); + //var mockup = Mockup.Get(workflowContext); + var mockup = false; - using ILoggerFactory loggerFactory = LoggerFactory.Create(builder => + string info = ($"#################\nSkill: {skillName}\nFunction: {functionName}\nPrompt: {prompt}\n#################\n\n"); + + if (mockup) { - builder - .SetMinimumLevel(kernelSettings.LogLevel ?? LogLevel.Warning); - }); - /* var memoryStore = new QdrantMemoryStore(new QdrantVectorDbClient("http://qdrant", 1536, port: 6333)); - var embedingGeneration = new AzureTextEmbeddingGeneration(kernelSettings.EmbeddingDeploymentOrModelId, kernelSettings.Endpoint, kernelSettings.ApiKey); - var semanticTextMemory = new SemanticTextMemory(memoryStore, embedingGeneration); - */ - var kernel = new KernelBuilder() - .WithLogger(loggerFactory.CreateLogger()) - .WithAzureChatCompletionService(kernelSettings.DeploymentOrModelId, kernelSettings.Endpoint, kernelSettings.ApiKey, true, kernelSettings.ServiceId, true) - //.WithMemory(semanticTextMemory) - .WithConfiguration(kernelConfig) - .Configure(c => c.SetDefaultHttpRetryConfig(new HttpRetryConfig + workflowContext.SetResult(info); + } + else { - MaxRetryCount = maxRetries, - UseExponentialBackoff = true, - // MinRetryDelay = TimeSpan.FromSeconds(2), - // MaxRetryDelay = TimeSpan.FromSeconds(8), - MaxTotalRetryTime = TimeSpan.FromSeconds(300), - // RetryableStatusCodes = new[] { HttpStatusCode.TooManyRequests, HttpStatusCode.RequestTimeout }, - // RetryableExceptions = new[] { typeof(HttpRequestException) } - })) - .Build(); + var kernelSettings = KernelSettings.LoadSettings(); + var kernelConfig = new KernelConfig(); -/* var interestingMemories = kernel.Memory.SearchAsync("ImportedMemories", prompt, 2); - var wafContext = "Consider the following contextual snippets:"; - await foreach (var memory in interestingMemories) - { - wafContext += $"\n {memory.Metadata.Text}"; - } */ + using ILoggerFactory loggerFactory = LoggerFactory.Create(builder => + { + builder + .SetMinimumLevel(kernelSettings.LogLevel ?? LogLevel.Warning); + }); + /* var memoryStore = new QdrantMemoryStore(new QdrantVectorDbClient("http://qdrant", 1536, port: 6333)); + var embedingGeneration = new AzureTextEmbeddingGeneration(kernelSettings.EmbeddingDeploymentOrModelId, kernelSettings.Endpoint, kernelSettings.ApiKey); + var semanticTextMemory = new SemanticTextMemory(memoryStore, embedingGeneration); + */ + var kernel = new KernelBuilder() + .WithLogger(loggerFactory.CreateLogger()) + .WithAzureChatCompletionService(kernelSettings.DeploymentOrModelId, kernelSettings.Endpoint, kernelSettings.ApiKey, true, kernelSettings.ServiceId, true) + //.WithMemory(semanticTextMemory) + .WithConfiguration(kernelConfig) + .Configure(c => c.SetDefaultHttpRetryConfig(new HttpRetryConfig + { + MaxRetryCount = maxRetries, + UseExponentialBackoff = true, + // MinRetryDelay = TimeSpan.FromSeconds(2), + // MaxRetryDelay = TimeSpan.FromSeconds(8), + MaxTotalRetryTime = TimeSpan.FromSeconds(300), + // RetryableStatusCodes = new[] { HttpStatusCode.TooManyRequests, HttpStatusCode.RequestTimeout }, + // RetryableExceptions = new[] { typeof(HttpRequestException) } + })) + .Build(); - var skillConfig = SemanticFunctionConfig.ForSkillAndFunction(skillName, functionName); - var function = kernel.CreateSemanticFunction(skillConfig.PromptTemplate, skillConfig.Name, skillConfig.SkillName, - skillConfig.Description, skillConfig.MaxTokens, skillConfig.Temperature, - skillConfig.TopP, skillConfig.PPenalty, skillConfig.FPenalty); + /* var interestingMemories = kernel.Memory.SearchAsync("ImportedMemories", prompt, 2); + var wafContext = "Consider the following contextual snippets:"; + await foreach (var memory in interestingMemories) + { + wafContext += $"\n {memory.Metadata.Text}"; + } */ - var context = new ContextVariables(); - context.Set("input", prompt); - //context.Set("wafContext", wafContext); + var skillConfig = SemanticFunctionConfig.ForSkillAndFunction(skillName, functionName); + var function = kernel.CreateSemanticFunction(skillConfig.PromptTemplate, skillConfig.Name, skillConfig.SkillName, + skillConfig.Description, skillConfig.MaxTokens, skillConfig.Temperature, + skillConfig.TopP, skillConfig.PPenalty, skillConfig.FPenalty); - SKContext answer = await kernel.RunAsync(context, function).ConfigureAwait(false); - string result = answer.Result; + var context = new ContextVariables(); + context.Set("input", prompt); + //context.Set("wafContext", wafContext); - //debug output to console - Console.WriteLine($"Skill: {skillName}\nFunction: {functionName}\nPrompt: {prompt}Answer: {result}"); - workflowContext.SetResult(result); + SKContext answer = await kernel.RunAsync(context, function).ConfigureAwait(false); + string result = answer.Result; + + Console.WriteLine(info); + + workflowContext.SetResult(result); + } } } \ No newline at end of file diff --git a/skills/DevLead.cs b/skills/DevLead.cs index 9ef4b92a4..583e024b7 100644 --- a/skills/DevLead.cs +++ b/skills/DevLead.cs @@ -9,7 +9,42 @@ public static class DevLead { For each step or module then break down the steps or subtasks required to complete that step or module. For each subtask write an LLM prompt that would be used to tell a model to write the coee that will accomplish that subtask. If the subtask involves taking action/running commands tell the model to write the script that will run those commands. In each LLM prompt restrict the model from outputting other text that is not in the form of code or code comments. - Please output a JSON data structure with a list of steps and a description of each step, and the steps or subtasks that each requires, and the LLM prompts for each subtask. + Please output a JSON array data structure with a list of steps and a description of each step, and the steps or subtasks that each requires, and the LLM prompts for each subtask. + Example: + [ + { + "step": "Step 1", + "description": "This is the first step", + "subtasks": [ + { + "subtask": "Subtask 1", + "description": "This is the first subtask", + "prompt": "Write the code to do the first subtask" + }, + { + "subtask": "Subtask 2", + "description": "This is the second subtask", + "prompt": "Write the code to do the second subtask" + } + ] + }, + { + "step": "Step 2", + "description": "This is the second step", + "subtasks": [ + { + "subtask": "Subtask 1", + "description": "This is the first subtask", + "prompt": "Write the code to do the first subtask" + }, + { + "subtask": "Subtask 2", + "description": "This is the second subtask", + "prompt": "Write the code to do the second subtask" + } + ] + } + ] Do not output any other text. Input: {{$input}} {{$wafContext}}