1package main
  2
  3// Integration with the Ollama local AI API. It allows the editor to check AI
  4// availability and generate text completions.
  5
  6import (
  7	"bytes"
  8	"encoding/json"
  9	"fmt"
 10	"io"
 11	"net/http"
 12	"time"
 13
 14	"github.com/nsf/termbox-go"
 15)
 16
 17// OllamaClient handles HTTP requests to the Ollama server.
 18type OllamaClient struct {
 19	IsOnline bool   // Current availability of the local LLM.
 20	URL      string // Base API endpoint for status checks.
 21}
 22
 23// GenerateRequest defines the payload for text generation.
 24type GenerateRequest struct {
 25	Model  string `json:"model"`
 26	Prompt string `json:"prompt"`
 27	Stream bool   `json:"stream"`
 28}
 29
 30// GenerateResponse defines the server's reply for text generation.
 31type GenerateResponse struct {
 32	Model    string `json:"model"`
 33	Response string `json:"response"`
 34	Done     bool   `json:"done"`
 35}
 36
 37// NewOllamaClient initializes the client with the configured URL.
 38func NewOllamaClient() *OllamaClient {
 39	return &OllamaClient{
 40		IsOnline: false,
 41		URL:      fmt.Sprintf("%s/api/tags", Config.OllamaURL),
 42	}
 43}
 44
 45// PeriodicStatusCheck starts a background goroutine to monitor AI availability.
 46func (c *OllamaClient) PeriodicStatusCheck() {
 47	go func() {
 48		for {
 49			prevStatus := c.IsOnline
 50			currentStatus := c.CheckStatus()
 51			// If status changes, trigger a UI update to refresh the AI indicator.
 52			if prevStatus != currentStatus {
 53				termbox.Interrupt()
 54			}
 55			time.Sleep(Config.OllamaCheckInterval)
 56		}
 57	}()
 58}
 59
 60// CheckStatus pings the Ollama server to see if it responds.
 61func (c *OllamaClient) CheckStatus() bool {
 62	client := http.Client{
 63		Timeout: 1 * time.Second, // Fail fast if server is down.
 64	}
 65
 66	resp, err := client.Get(c.URL)
 67	if err != nil {
 68		c.IsOnline = false
 69		return false
 70	}
 71	defer resp.Body.Close()
 72
 73	if resp.StatusCode == http.StatusOK {
 74		c.IsOnline = true
 75	} else {
 76		c.IsOnline = false
 77	}
 78
 79	return c.IsOnline
 80}
 81
 82// Generate sends a prompt to the LLM and returns the generated text.
 83func (c *OllamaClient) Generate(prompt string) (string, error) {
 84	url := fmt.Sprintf("%s/api/generate", Config.OllamaURL)
 85	reqBody := GenerateRequest{
 86		Model:  Config.OllamaModel,
 87		Prompt: prompt,
 88		Stream: false, // We want the full result at once for simplified handling.
 89	}
 90
 91	jsonData, err := json.Marshal(reqBody)
 92	if err != nil {
 93		return "", err
 94	}
 95
 96	resp, err := http.Post(url, "application/json", bytes.NewBuffer(jsonData))
 97	if err != nil {
 98		return "", err
 99	}
100	defer resp.Body.Close()
101
102	if resp.StatusCode != http.StatusOK {
103		return "", fmt.Errorf("ollama error: %s", resp.Status)
104	}
105
106	body, err := io.ReadAll(resp.Body)
107	if err != nil {
108		return "", err
109	}
110
111	var genResp GenerateResponse
112	if err := json.Unmarshal(body, &genResp); err != nil {
113		return "", err
114	}
115
116	return genResp.Response, nil
117}