From bce439921fc537bc59654ce025595dda2b199992 Mon Sep 17 00:00:00 2001 From: armistace Date: Mon, 16 Jun 2025 10:35:21 +1000 Subject: [PATCH] Generate tags around context --- src/ai_generators/ollama_md_generator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ai_generators/ollama_md_generator.py b/src/ai_generators/ollama_md_generator.py index 85db5db..7dd1653 100644 --- a/src/ai_generators/ollama_md_generator.py +++ b/src/ai_generators/ollama_md_generator.py @@ -33,7 +33,7 @@ class OllamaGenerator: The title for the blog is {self.inner_title}. Do not output the title in the markdown. The basis for the content of the blog is: - {self.content} + {self.content} """ def split_into_chunks(self, text, chunk_size=100): @@ -130,7 +130,7 @@ class OllamaGenerator: The title for the blog is {self.inner_title}. Do not output the title in the markdown. Avoid repeated sentences The basis for the content of the blog is: - {self.content} + {self.content} """ try: query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings'] @@ -139,7 +139,7 @@ class OllamaGenerator: print("Showing pertinent info from drafts used in final edited edition") pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0]) #print(pertinent_draft_info) - prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: {pertinent_draft_info} + prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: {pertinent_draft_info} - Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context """ print("Generating final document")