From 675c7d17016f2922c74b1912e05bc41a0bd6f2d9 Mon Sep 17 00:00:00 2001 From: "Andrew Ridgway (aider)" Date: Wed, 13 Nov 2024 22:41:53 +1000 Subject: [PATCH] feat: Create OllamaGenerator class for Pelican markdown generation --- ollama_generator.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 ollama_generator.py diff --git a/ollama_generator.py b/ollama_generator.py new file mode 100644 index 0000000..8bccd5c --- /dev/null +++ b/ollama_generator.py @@ -0,0 +1,28 @@ +import os +import requests + +class OllamaGenerator: + def __init__(self, title: str, content: str): + self.title = title + self.content = content + self.ollama_url = f"{os.getenv('OLLAMA_PROTOCOL')}://{os.getenv('OLLAMA_HOST')}:{os.getenv('OLLAMA_PORT')}/api/generate" + + def generate_markdown(self) -> str: + data = { + "prompt": f"Title: {self.title}\n\nContent:\n{self.content}", + "max_tokens": 1000, + "temperature": 0.7, + "top_p": 0.9, + "presence_penalty": 0, + "frequency_penalty": 0 + } + + response = requests.post(self.ollama_url, json=data) + if response.status_code == 200: + return response.json()["choices"][0]["text"] + else: + raise Exception(f"Failed to generate markdown: {response.text}") + + def save_to_file(self, filename: str) -> None: + with open(filename, "w") as f: + f.write(self.generate_markdown())