From 58637028fc8cfb710c62fec1780699c8bfca5653 Mon Sep 17 00:00:00 2001 From: Abel <Segayeabel@gmail.com> Date: Thu, 10 Oct 2024 18:05:20 +0200 Subject: [PATCH] dbfunction finished --- src/context-holder/context.go | 4 +- .../context-with-indicators.go | 134 +++++++++++++++ src/database/db.go | 9 +- src/extraction/extract.go | 93 +++++++++++ src/loop-implementation/loop.go | 74 +++++++++ src/preliminary-implementation/preliminary.go | 157 ++++++++++-------- 6 files changed, 399 insertions(+), 72 deletions(-) create mode 100644 src/context-with-indicators/context-with-indicators.go create mode 100644 src/extraction/extract.go create mode 100644 src/loop-implementation/loop.go diff --git a/src/context-holder/context.go b/src/context-holder/context.go index 240e3b4..ae422e6 100644 --- a/src/context-holder/context.go +++ b/src/context-holder/context.go @@ -69,8 +69,8 @@ func getOllamaResponse(prompt string, context []int) (string, []int, error) { // Create request payload with the model specified and context requestBody, err := json.Marshal(OllamaRequest{ Prompt: prompt, - Model: "llama3.1", - Context: context, // Pass the conversation context + Model: "llama3.1", + Context: context, // Pass the conversation context }) if err != nil { return "", nil, err diff --git a/src/context-with-indicators/context-with-indicators.go b/src/context-with-indicators/context-with-indicators.go new file mode 100644 index 0000000..4d7ea21 --- /dev/null +++ b/src/context-with-indicators/context-with-indicators.go @@ -0,0 +1,134 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "time" +) + +const ollamaEndpoint = "http://localhost:11434/api/generate" // The local endpoint for the Ollama API + +// Struct for request to Ollama API +type OllamaRequest struct { + Prompt string `json:"prompt"` + Model string `json:"model"` + Context []int `json:"context,omitempty"` // Context to maintain conversation +} + +// Struct for response from Ollama API +type OllamaResponse struct { + Model string `json:"model"` + CreatedAt string `json:"created_at"` + Response string `json:"response"` + Done bool `json:"done"` + DoneReason string `json:"done_reason,omitempty"` + Context []int `json:"context,omitempty"` // Updated context + TotalDuration int64 `json:"total_duration,omitempty"` + LoadDuration int64 `json:"load_duration,omitempty"` + PromptEvalCount int `json:"prompt_eval_count,omitempty"` + PromptEvalDuration int64 `json:"prompt_eval_duration,omitempty"` + EvalCount int `json:"eval_count,omitempty"` + EvalDuration int64 `json:"eval_duration,omitempty"` +} + +func main() { + reader := bufio.NewReader(os.Stdin) + var conversationContext []int // Variable to store conversation context + + for { + + fmt.Print("Enter your prompt (or type 'exit' to quit): ") + userPrompt, _ := reader.ReadString('\n') + userPrompt = strings.TrimSpace(userPrompt) + + if userPrompt == "exit" { + fmt.Println("Exiting the program.") + break + } + + fmt.Println("Prompt received. Generating response...") + + // Start a go routine to display a waiting indicator while the response is being generated + done := make(chan bool) + go displayLoadingIndicator(done) + + // Generate response using Ollama API, passing the context + response, updatedContext, err := getOllamaResponse(userPrompt, conversationContext) + + // Signal the waiting indicator to stop + done <- true + + if err != nil { + fmt.Println("Error generating response:", err) + continue + } + + // Update the conversation context with the response + conversationContext = updatedContext + + fmt.Println("Ollama's response:", response) + } +} + +// Function to display a waiting/loading indicator +func displayLoadingIndicator(done chan bool) { + indicator := []string{"|", "/", "-", "\\"} + i := 0 + for { + select { + case <-done: + fmt.Print("\r") // Clear the waiting indicator when done + return + default: + fmt.Printf("\r%s Generating...", indicator[i%len(indicator)]) + i++ + time.Sleep(200 * time.Millisecond) + } + } +} + +// Function to make a POST request to Ollama API +func getOllamaResponse(prompt string, context []int) (string, []int, error) { + // Create request payload with the model specified and context + requestBody, err := json.Marshal(OllamaRequest{ + Prompt: prompt, + Model: "llama3.1", + Context: context, // Pass the conversation context + }) + if err != nil { + return "", nil, err + } + + // Send HTTP POST request to Ollama API + resp, err := http.Post(ollamaEndpoint, "application/json", bytes.NewBuffer(requestBody)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + // Read and accumulate response body in chunks + var completeResponse string + var updatedContext []int + decoder := json.NewDecoder(resp.Body) + for decoder.More() { + var chunk OllamaResponse + if err := decoder.Decode(&chunk); err != nil { + return "", nil, err + } + completeResponse += chunk.Response + + // Capture the updated context from the response + updatedContext = chunk.Context + + if chunk.Done { + break + } + } + + return completeResponse, updatedContext, nil +} diff --git a/src/database/db.go b/src/database/db.go index d2b05dc..405848a 100644 --- a/src/database/db.go +++ b/src/database/db.go @@ -1,4 +1,4 @@ -package main +package projectdb import ( "context" @@ -9,10 +9,11 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" ) -const uri_db = "mongodb://localhost:27017" // The connection string for the MongoDB database +const uri_db = "mongodb://localhost:27017" -func main() { +// The connection string for the MongoDB database +func WriteDataToDatabase(InputQuery string, SolutionCode string, CompilerOutput string, TestCases string, Timestamp string, TimeTest string) { // Connect to the MongoDB server clientOptions := options.Client().ApplyURI(uri_db) client, err := mongo.Connect(context.TODO(), clientOptions) @@ -32,7 +33,7 @@ func main() { collection := client.Database("db").Collection("test") //Need to fix the document, so that can store inistial prompt, and the generated code - document := map[string]string{"Input query": "Input query", "Solution code": "solution code", "Compiler output": "compiler output", "testcases": "testcases", "timestamp": "timestamp", "timeTest": "timeTest"} + document := map[string]string{"Input query": InputQuery, "Solution code": SolutionCode, "Compiler output": CompilerOutput, "testcases": TestCases, "timestamp": Timestamp, "timeTest": TimeTest} _, err = collection.InsertOne(context.TODO(), document) if err != nil { diff --git a/src/extraction/extract.go b/src/extraction/extract.go new file mode 100644 index 0000000..0e13153 --- /dev/null +++ b/src/extraction/extract.go @@ -0,0 +1,93 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" +) + +const ollamaEndpoint = "http://localhost:11434/api/generate" // The local endpoint for the Ollama API + +// Struct for request to Ollama API +type OllamaRequest struct { + Prompt string `json:"prompt"` + Model string `json:"model"` +} + +// Struct for response from Ollama API +type OllamaResponse struct { + Response string `json:"response"` + Done bool `json:"done"` +} + +func main() { + reader := bufio.NewReader(os.Stdin) + fmt.Print("Enter your prompt: ") + userPrompt, _ := reader.ReadString('\n') + + // Generate response using Ollama API + + requestBody, err := json.Marshal(OllamaRequest{ + Prompt: userPrompt, + Model: "llama3.1", + }) + if err != nil { + fmt.Println("Error generating response:", err) + return + } + + // Send HTTP POST request to Ollama API + resp, err := http.Post(ollamaEndpoint, "application/json", bytes.NewBuffer(requestBody)) + if err != nil { + fmt.Println("Error generating response:", err) + return + } + defer resp.Body.Close() + + // Read and accumulate response body in chunks + var completeResponse string + decoder := json.NewDecoder(resp.Body) + for decoder.More() { + var chunk OllamaResponse + if err := decoder.Decode(&chunk); err != nil { + return + } + completeResponse += chunk.Response + + if chunk.Done { + break + } + } + + //fmt.Println("Ollama's response:", completeResponse) + var out = extract(completeResponse) + fmt.Println(out) +} + +func extract(output string) string { + var extracted = "" + var count = 0 + var out = false + for _, i := range output { + if i == '`' { + count++ + if count == 3 { + if out == true { + out = false + } else { + out = true + } + count = 0 + } + } + if out == true { + if i != '`' { + extracted += string(i) + } + } + } + return extracted +} diff --git a/src/loop-implementation/loop.go b/src/loop-implementation/loop.go new file mode 100644 index 0000000..179c63e --- /dev/null +++ b/src/loop-implementation/loop.go @@ -0,0 +1,74 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" +) + +const ollamaEndpoint = "http://localhost:11434/api/generate" // The local endpoint for the Ollama API + +// Struct for request to Ollama API +type OllamaRequest struct { + Prompt string `json:"prompt"` + Model string `json:"model"` +} + +// Struct for response from Ollama API +type OllamaResponse struct { + Response string `json:"response"` + Done bool `json:"done"` +} + +func main() { + + for { + reader := bufio.NewReader(os.Stdin) + fmt.Print("Enter your prompt: ") + userPrompt, _ := reader.ReadString('\n') + + // Generate response using Ollama API + + if userPrompt == "exit" { + fmt.Println("Exiting the program.") + break + } + + requestBody, err := json.Marshal(OllamaRequest{ + Prompt: userPrompt, + Model: "llama3.1", + }) + if err != nil { + fmt.Println("Error generating response:", err) + return + } + + // Send HTTP POST request to Ollama API + resp, err := http.Post(ollamaEndpoint, "application/json", bytes.NewBuffer(requestBody)) + if err != nil { + fmt.Println("Error generating response:", err) + return + } + defer resp.Body.Close() + + // Read and accumulate response body in chunks + var completeResponse string + decoder := json.NewDecoder(resp.Body) + for decoder.More() { + var chunk OllamaResponse + if err := decoder.Decode(&chunk); err != nil { + return + } + completeResponse += chunk.Response + + if chunk.Done { + break + } + } + + fmt.Println("Ollama's response:", completeResponse) + } +} diff --git a/src/preliminary-implementation/preliminary.go b/src/preliminary-implementation/preliminary.go index 75423c5..07505d4 100644 --- a/src/preliminary-implementation/preliminary.go +++ b/src/preliminary-implementation/preliminary.go @@ -1,66 +1,91 @@ -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "net/http" - "os" -) - -const ollamaEndpoint = "http://localhost:11434/api/generate" // The local endpoint for the Ollama API - -// Struct for request to Ollama API -type OllamaRequest struct { - Prompt string `json:"prompt"` - Model string `json:"model"` -} - -// Struct for response from Ollama API -type OllamaResponse struct { - Response string `json:"response"` - Done bool `json:"done"` -} - -func main() { - reader := bufio.NewReader(os.Stdin) - fmt.Print("Enter your prompt: ") - userPrompt, _ := reader.ReadString('\n') - - // Generate response using Ollama API - - requestBody, err := json.Marshal(OllamaRequest{ - Prompt: userPrompt, - Model: "llama3.1", - }) - if err != nil { - fmt.Println("Error generating response:", err) - return - } - - // Send HTTP POST request to Ollama API - resp, err := http.Post(ollamaEndpoint, "application/json", bytes.NewBuffer(requestBody)) - if err != nil { - fmt.Println("Error generating response:", err) - return - } - defer resp.Body.Close() - - // Read and accumulate response body in chunks - var completeResponse string - decoder := json.NewDecoder(resp.Body) - for decoder.More() { - var chunk OllamaResponse - if err := decoder.Decode(&chunk); err != nil { - return - } - completeResponse += chunk.Response - - if chunk.Done { - break - } - } - - fmt.Println("Ollama's response:", completeResponse) -} +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" +) + +const ollamaEndpoint = "http://localhost:11434/api/generate" // The local endpoint for the Ollama API + +// Struct for request to Ollama API +type OllamaRequest struct { + Prompt string `json:"prompt"` + Model string `json:"model"` +} + +// Struct for response from Ollama API +type OllamaResponse struct { + Response string `json:"response"` + Done bool `json:"done"` +} + +func main() { + reader := bufio.NewReader(os.Stdin) + fmt.Print("Enter your prompt: ") + userPrompt, _ := reader.ReadString('\n') + + //Modifying the user prompt to make the code in rust or gloang, will always be rust so far + var userPromptModified string + if true { + userPromptModified = userPrompt + "and make the code in rust" + } else { + userPromptModified = userPrompt + "and make the code in golang" + } + fmt.Println(userPromptModified) + + // Generate response using Ollama API + + requestBody, err := json.Marshal(OllamaRequest{ + Prompt: userPromptModified, + Model: "llama3.1", + }) + if err != nil { + fmt.Println("Error generating response:", err) + return + } + + // Send HTTP POST request to Ollama APIĆ + resp, err := http.Post(ollamaEndpoint, "application/json", bytes.NewBuffer(requestBody)) + if err != nil { + fmt.Println("Error generating response:", err) + return + } + defer resp.Body.Close() + + // Read and accumulate response body in chunks + var completeResponse string + decoder := json.NewDecoder(resp.Body) + for decoder.More() { + var chunk OllamaResponse + if err := decoder.Decode(&chunk); err != nil { + return + } + completeResponse += chunk.Response + + if chunk.Done { + break + } + } + + fmt.Println("Ollama's response:", completeResponse) + // var out = extract(completeResponse) + // fmt.Println(out) +} + +// Extracting strings from everthing between ``` +// Will be used once we decide how to get rid of explanations with the code. +func extract(output string) string { + parts := strings.Split(output, "```") + var extracted = "" + if strings.Contains(parts[1], "rust") { + extracted = strings.TrimLeft(parts[1], "rust") + } else { + extracted = strings.TrimLeft(parts[1], "go") + } + return extracted +} -- GitLab