Compare commits

...

15 Commits

Author SHA1 Message Date
f2b95935bb Prompt enhancement to produce content 2025-06-24 13:05:15 +10:00
bce439921f Generate tags around context 2025-06-16 10:35:21 +10:00
2de2d0fe3a Merge pull request 'prompt enhancement' () from prompt_fix into master
Reviewed-on: 
2025-06-06 12:04:44 +10:00
cf795bbc35 prompt enhancement 2025-06-06 12:04:19 +10:00
a6ed20451a Merge pull request 'pipeline_creation' () from pipeline_creation into master
Reviewed-on: 
2025-06-05 09:22:50 +10:00
2abc39e3ac Merge pull request 'pipeline_creation' () from pipeline_creation into master
Reviewed-on: 
2025-06-05 08:43:23 +10:00
0594ea54aa remove repo reference 2025-06-05 01:02:42 +10:00
60f7473297 remove trailing slash 2025-06-05 01:00:58 +10:00
ec69e8e4f7 Merge pull request 'do it right' () from pipeline_creation into master
Reviewed-on: 
2025-06-05 00:47:05 +10:00
41f804a1eb Merge pull request 'pipeline_creation' () from pipeline_creation into master
Reviewed-on: 
2025-06-05 00:45:53 +10:00
e3262cd366 Merge pull request 'weird trailing newline"' () from pipeline_creation into master
Reviewed-on: 
2025-06-05 00:12:04 +10:00
e2c29204fa Merge pull request 'pipeline_creation' () from pipeline_creation into master
Reviewed-on: 
2025-06-04 23:48:10 +10:00
44b5ea6a68 Merge pull request 'load_dotenv work different?' () from pipeline_creation into master
Reviewed-on: 
2025-06-04 22:55:26 +10:00
9296fda390 Merge pull request 'tail the .env so we can see it in pipelin' () from pipeline_creation into master
Reviewed-on: 
2025-06-04 22:44:14 +10:00
703a2384e7 Merge pull request 'sigh stray U' () from pipeline_creation into master
Reviewed-on: 
2025-06-04 22:30:36 +10:00

@ -33,7 +33,7 @@ class OllamaGenerator:
The title for the blog is {self.inner_title}.
Do not output the title in the markdown.
The basis for the content of the blog is:
{self.content}
<blog>{self.content}</blog>
"""
def split_into_chunks(self, text, chunk_size=100):
@ -125,11 +125,13 @@ class OllamaGenerator:
writing a 3000 word blog article. You like when they use almost no code examples.
You are also Australian. The content may have light comedic elements,
you are more professional and will attempt to tone these down
As this person produce and an amalgamtion of this blog as a markdown document.
As this person produce the final version of this blog as a markdown document
keeping in mind the context provided by the previous drafts.
You are to produce the content not placeholders for further editors
The title for the blog is {self.inner_title}.
Do not output the title in the markdown. Avoid repeated sentences
The basis for the content of the blog is:
{self.content}
<blog>{self.content}</blog>
"""
try:
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
@ -138,7 +140,9 @@ class OllamaGenerator:
print("Showing pertinent info from drafts used in final edited edition")
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
#print(pertinent_draft_info)
prompt_human = f"Generate the final document using this information from the drafts: {pertinent_draft_info} - Only output in markdown, do not wrap in markdown tags"
prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
"""
print("Generating final document")
messages = [("system", prompt_system), ("human", prompt_human),]
self.response = self.llm.invoke(messages).text()